summaryrefslogtreecommitdiffstats
path: root/drivers/vdpa/ifcvf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vdpa/ifcvf')
-rw-r--r--drivers/vdpa/ifcvf/Makefile3
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c432
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h134
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c882
4 files changed, 1451 insertions, 0 deletions
diff --git a/drivers/vdpa/ifcvf/Makefile b/drivers/vdpa/ifcvf/Makefile
new file mode 100644
index 0000000000..d709915995
--- /dev/null
+++ b/drivers/vdpa/ifcvf/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_IFCVF) += ifcvf.o
+ifcvf-$(CONFIG_IFCVF) += ifcvf_main.o ifcvf_base.o
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
new file mode 100644
index 0000000000..060f837a4f
--- /dev/null
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel IFC VF NIC driver for virtio dataplane offloading
+ *
+ * Copyright (C) 2020 Intel Corporation.
+ *
+ * Author: Zhu Lingshan <lingshan.zhu@intel.com>
+ *
+ */
+
+#include "ifcvf_base.h"
+
+u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+
+ vp_iowrite16(qid, &cfg->queue_select);
+ vp_iowrite16(vector, &cfg->queue_msix_vector);
+
+ return vp_ioread16(&cfg->queue_msix_vector);
+}
+
+u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+
+ vp_iowrite16(vector, &cfg->msix_config);
+
+ return vp_ioread16(&cfg->msix_config);
+}
+
+static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
+ struct virtio_pci_cap *cap)
+{
+ u32 length, offset;
+ u8 bar;
+
+ length = le32_to_cpu(cap->length);
+ offset = le32_to_cpu(cap->offset);
+ bar = cap->bar;
+
+ if (bar >= IFCVF_PCI_MAX_RESOURCE) {
+ IFCVF_DBG(hw->pdev,
+ "Invalid bar number %u to get capabilities\n", bar);
+ return NULL;
+ }
+
+ if (offset + length > pci_resource_len(hw->pdev, bar)) {
+ IFCVF_DBG(hw->pdev,
+ "offset(%u) + len(%u) overflows bar%u's capability\n",
+ offset, length, bar);
+ return NULL;
+ }
+
+ return hw->base[bar] + offset;
+}
+
+static int ifcvf_read_config_range(struct pci_dev *dev,
+ uint32_t *val, int size, int where)
+{
+ int ret, i;
+
+ for (i = 0; i < size; i += 4) {
+ ret = pci_read_config_dword(dev, where + i, val + i / 4);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid)
+{
+ u16 queue_size;
+
+ vp_iowrite16(qid, &hw->common_cfg->queue_select);
+ queue_size = vp_ioread16(&hw->common_cfg->queue_size);
+
+ return queue_size;
+}
+
+/* This function returns the max allowed safe size for
+ * all virtqueues. It is the minimal size that can be
+ * suppprted by all virtqueues.
+ */
+u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw)
+{
+ u16 queue_size, max_size, qid;
+
+ max_size = ifcvf_get_vq_size(hw, 0);
+ for (qid = 1; qid < hw->nr_vring; qid++) {
+ queue_size = ifcvf_get_vq_size(hw, qid);
+ /* 0 means the queue is unavailable */
+ if (!queue_size)
+ continue;
+
+ max_size = min(queue_size, max_size);
+ }
+
+ return max_size;
+}
+
+int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
+{
+ struct virtio_pci_cap cap;
+ u16 notify_off;
+ int ret;
+ u8 pos;
+ u32 i;
+
+ ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
+ if (ret < 0) {
+ IFCVF_ERR(pdev, "Failed to read PCI capability list\n");
+ return -EIO;
+ }
+ hw->pdev = pdev;
+
+ while (pos) {
+ ret = ifcvf_read_config_range(pdev, (u32 *)&cap,
+ sizeof(cap), pos);
+ if (ret < 0) {
+ IFCVF_ERR(pdev,
+ "Failed to get PCI capability at %x\n", pos);
+ break;
+ }
+
+ if (cap.cap_vndr != PCI_CAP_ID_VNDR)
+ goto next;
+
+ switch (cap.cfg_type) {
+ case VIRTIO_PCI_CAP_COMMON_CFG:
+ hw->common_cfg = get_cap_addr(hw, &cap);
+ IFCVF_DBG(pdev, "hw->common_cfg = %p\n",
+ hw->common_cfg);
+ break;
+ case VIRTIO_PCI_CAP_NOTIFY_CFG:
+ pci_read_config_dword(pdev, pos + sizeof(cap),
+ &hw->notify_off_multiplier);
+ hw->notify_bar = cap.bar;
+ hw->notify_base = get_cap_addr(hw, &cap);
+ hw->notify_base_pa = pci_resource_start(pdev, cap.bar) +
+ le32_to_cpu(cap.offset);
+ IFCVF_DBG(pdev, "hw->notify_base = %p\n",
+ hw->notify_base);
+ break;
+ case VIRTIO_PCI_CAP_ISR_CFG:
+ hw->isr = get_cap_addr(hw, &cap);
+ IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr);
+ break;
+ case VIRTIO_PCI_CAP_DEVICE_CFG:
+ hw->dev_cfg = get_cap_addr(hw, &cap);
+ hw->cap_dev_config_size = le32_to_cpu(cap.length);
+ IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg);
+ break;
+ }
+
+next:
+ pos = cap.cap_next;
+ }
+
+ if (hw->common_cfg == NULL || hw->notify_base == NULL ||
+ hw->isr == NULL || hw->dev_cfg == NULL) {
+ IFCVF_ERR(pdev, "Incomplete PCI capabilities\n");
+ return -EIO;
+ }
+
+ hw->nr_vring = vp_ioread16(&hw->common_cfg->num_queues);
+ hw->vring = kzalloc(sizeof(struct vring_info) * hw->nr_vring, GFP_KERNEL);
+ if (!hw->vring)
+ return -ENOMEM;
+
+ for (i = 0; i < hw->nr_vring; i++) {
+ vp_iowrite16(i, &hw->common_cfg->queue_select);
+ notify_off = vp_ioread16(&hw->common_cfg->queue_notify_off);
+ hw->vring[i].notify_addr = hw->notify_base +
+ notify_off * hw->notify_off_multiplier;
+ hw->vring[i].notify_pa = hw->notify_base_pa +
+ notify_off * hw->notify_off_multiplier;
+ hw->vring[i].irq = -EINVAL;
+ }
+
+ hw->lm_cfg = hw->base[IFCVF_LM_BAR];
+
+ IFCVF_DBG(pdev,
+ "PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
+ hw->common_cfg, hw->notify_base, hw->isr,
+ hw->dev_cfg, hw->notify_off_multiplier);
+
+ hw->vqs_reused_irq = -EINVAL;
+ hw->config_irq = -EINVAL;
+
+ return 0;
+}
+
+u8 ifcvf_get_status(struct ifcvf_hw *hw)
+{
+ return vp_ioread8(&hw->common_cfg->device_status);
+}
+
+void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
+{
+ vp_iowrite8(status, &hw->common_cfg->device_status);
+}
+
+void ifcvf_reset(struct ifcvf_hw *hw)
+{
+ ifcvf_set_status(hw, 0);
+ while (ifcvf_get_status(hw))
+ msleep(1);
+}
+
+u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+ u32 features_lo, features_hi;
+ u64 features;
+
+ vp_iowrite32(0, &cfg->device_feature_select);
+ features_lo = vp_ioread32(&cfg->device_feature);
+
+ vp_iowrite32(1, &cfg->device_feature_select);
+ features_hi = vp_ioread32(&cfg->device_feature);
+
+ features = ((u64)features_hi << 32) | features_lo;
+
+ return features;
+}
+
+/* return provisioned vDPA dev features */
+u64 ifcvf_get_dev_features(struct ifcvf_hw *hw)
+{
+ return hw->dev_features;
+}
+
+u64 ifcvf_get_driver_features(struct ifcvf_hw *hw)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+ u32 features_lo, features_hi;
+ u64 features;
+
+ vp_iowrite32(0, &cfg->device_feature_select);
+ features_lo = vp_ioread32(&cfg->guest_feature);
+
+ vp_iowrite32(1, &cfg->device_feature_select);
+ features_hi = vp_ioread32(&cfg->guest_feature);
+
+ features = ((u64)features_hi << 32) | features_lo;
+
+ return features;
+}
+
+int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
+{
+ if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
+ IFCVF_ERR(hw->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
+{
+ u32 net_config_size = sizeof(struct virtio_net_config);
+ u32 blk_config_size = sizeof(struct virtio_blk_config);
+ u32 cap_size = hw->cap_dev_config_size;
+ u32 config_size;
+
+ /* If the onboard device config space size is greater than
+ * the size of struct virtio_net/blk_config, only the spec
+ * implementing contents size is returned, this is very
+ * unlikely, defensive programming.
+ */
+ switch (hw->dev_type) {
+ case VIRTIO_ID_NET:
+ config_size = min(cap_size, net_config_size);
+ break;
+ case VIRTIO_ID_BLOCK:
+ config_size = min(cap_size, blk_config_size);
+ break;
+ default:
+ config_size = 0;
+ IFCVF_ERR(hw->pdev, "VIRTIO ID %u not supported\n", hw->dev_type);
+ }
+
+ return config_size;
+}
+
+void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
+ void *dst, int length)
+{
+ u8 old_gen, new_gen, *p;
+ int i;
+
+ WARN_ON(offset + length > hw->config_size);
+ do {
+ old_gen = vp_ioread8(&hw->common_cfg->config_generation);
+ p = dst;
+ for (i = 0; i < length; i++)
+ *p++ = vp_ioread8(hw->dev_cfg + offset + i);
+
+ new_gen = vp_ioread8(&hw->common_cfg->config_generation);
+ } while (old_gen != new_gen);
+}
+
+void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
+ const void *src, int length)
+{
+ const u8 *p;
+ int i;
+
+ p = src;
+ WARN_ON(offset + length > hw->config_size);
+ for (i = 0; i < length; i++)
+ vp_iowrite8(*p++, hw->dev_cfg + offset + i);
+}
+
+void ifcvf_set_driver_features(struct ifcvf_hw *hw, u64 features)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+
+ vp_iowrite32(0, &cfg->guest_feature_select);
+ vp_iowrite32((u32)features, &cfg->guest_feature);
+
+ vp_iowrite32(1, &cfg->guest_feature_select);
+ vp_iowrite32(features >> 32, &cfg->guest_feature);
+}
+
+u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
+{
+ struct ifcvf_lm_cfg __iomem *lm_cfg = hw->lm_cfg;
+ u16 last_avail_idx;
+
+ last_avail_idx = vp_ioread16(&lm_cfg->vq_state_region + qid * 2);
+
+ return last_avail_idx;
+}
+
+int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
+{
+ struct ifcvf_lm_cfg __iomem *lm_cfg = hw->lm_cfg;
+
+ vp_iowrite16(num, &lm_cfg->vq_state_region + qid * 2);
+
+ return 0;
+}
+
+void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+
+ vp_iowrite16(qid, &cfg->queue_select);
+ vp_iowrite16(num, &cfg->queue_size);
+}
+
+int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
+ u64 driver_area, u64 device_area)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+
+ vp_iowrite16(qid, &cfg->queue_select);
+ vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
+ &cfg->queue_desc_hi);
+ vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
+ &cfg->queue_avail_hi);
+ vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
+ &cfg->queue_used_hi);
+
+ return 0;
+}
+
+bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+ u16 queue_enable;
+
+ vp_iowrite16(qid, &cfg->queue_select);
+ queue_enable = vp_ioread16(&cfg->queue_enable);
+
+ return (bool)queue_enable;
+}
+
+void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+
+ vp_iowrite16(qid, &cfg->queue_select);
+ vp_iowrite16(ready, &cfg->queue_enable);
+}
+
+static void ifcvf_reset_vring(struct ifcvf_hw *hw)
+{
+ u16 qid;
+
+ for (qid = 0; qid < hw->nr_vring; qid++) {
+ hw->vring[qid].cb.callback = NULL;
+ hw->vring[qid].cb.private = NULL;
+ ifcvf_set_vq_vector(hw, qid, VIRTIO_MSI_NO_VECTOR);
+ }
+}
+
+static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
+{
+ hw->config_cb.callback = NULL;
+ hw->config_cb.private = NULL;
+ ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
+}
+
+static void ifcvf_synchronize_irq(struct ifcvf_hw *hw)
+{
+ u32 nvectors = hw->num_msix_vectors;
+ struct pci_dev *pdev = hw->pdev;
+ int i, irq;
+
+ for (i = 0; i < nvectors; i++) {
+ irq = pci_irq_vector(pdev, i);
+ if (irq >= 0)
+ synchronize_irq(irq);
+ }
+}
+
+void ifcvf_stop(struct ifcvf_hw *hw)
+{
+ ifcvf_synchronize_irq(hw);
+ ifcvf_reset_vring(hw);
+ ifcvf_reset_config_handler(hw);
+}
+
+void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
+{
+ vp_iowrite16(qid, hw->vring[qid].notify_addr);
+}
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
new file mode 100644
index 0000000000..b57849c643
--- /dev/null
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel IFC VF NIC driver for virtio dataplane offloading
+ *
+ * Copyright (C) 2020 Intel Corporation.
+ *
+ * Author: Zhu Lingshan <lingshan.zhu@intel.com>
+ *
+ */
+
+#ifndef _IFCVF_H_
+#define _IFCVF_H_
+
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/vdpa.h>
+#include <linux/virtio_pci_modern.h>
+#include <uapi/linux/virtio_net.h>
+#include <uapi/linux/virtio_blk.h>
+#include <uapi/linux/virtio_config.h>
+#include <uapi/linux/virtio_pci.h>
+#include <uapi/linux/vdpa.h>
+
+#define N3000_DEVICE_ID 0x1041
+#define N3000_SUBSYS_DEVICE_ID 0x001A
+
+#define IFCVF_QUEUE_ALIGNMENT PAGE_SIZE
+#define IFCVF_PCI_MAX_RESOURCE 6
+
+#define IFCVF_LM_BAR 4
+
+#define IFCVF_ERR(pdev, fmt, ...) dev_err(&pdev->dev, fmt, ##__VA_ARGS__)
+#define IFCVF_DBG(pdev, fmt, ...) dev_dbg(&pdev->dev, fmt, ##__VA_ARGS__)
+#define IFCVF_INFO(pdev, fmt, ...) dev_info(&pdev->dev, fmt, ##__VA_ARGS__)
+
+/* all vqs and config interrupt has its own vector */
+#define MSIX_VECTOR_PER_VQ_AND_CONFIG 1
+/* all vqs share a vector, and config interrupt has a separate vector */
+#define MSIX_VECTOR_SHARED_VQ_AND_CONFIG 2
+/* all vqs and config interrupt share a vector */
+#define MSIX_VECTOR_DEV_SHARED 3
+
+struct vring_info {
+ u16 last_avail_idx;
+ void __iomem *notify_addr;
+ phys_addr_t notify_pa;
+ u32 irq;
+ struct vdpa_callback cb;
+ char msix_name[256];
+};
+
+struct ifcvf_lm_cfg {
+ __le64 control;
+ __le64 status;
+ __le64 lm_mem_log_start_addr;
+ __le64 lm_mem_log_end_addr;
+ __le16 vq_state_region;
+};
+
+struct ifcvf_hw {
+ u8 __iomem *isr;
+ /* Live migration */
+ struct ifcvf_lm_cfg __iomem *lm_cfg;
+ /* Notification bar number */
+ u8 notify_bar;
+ u8 msix_vector_status;
+ /* virtio-net or virtio-blk device config size */
+ u32 config_size;
+ /* Notificaiton bar address */
+ void __iomem *notify_base;
+ phys_addr_t notify_base_pa;
+ u32 notify_off_multiplier;
+ u32 dev_type;
+ u64 hw_features;
+ /* provisioned device features */
+ u64 dev_features;
+ struct virtio_pci_common_cfg __iomem *common_cfg;
+ void __iomem *dev_cfg;
+ struct vring_info *vring;
+ void __iomem * const *base;
+ char config_msix_name[256];
+ struct vdpa_callback config_cb;
+ int config_irq;
+ int vqs_reused_irq;
+ u16 nr_vring;
+ /* VIRTIO_PCI_CAP_DEVICE_CFG size */
+ u32 num_msix_vectors;
+ u32 cap_dev_config_size;
+ struct pci_dev *pdev;
+};
+
+struct ifcvf_adapter {
+ struct vdpa_device vdpa;
+ struct pci_dev *pdev;
+ struct ifcvf_hw *vf;
+};
+
+struct ifcvf_vdpa_mgmt_dev {
+ struct vdpa_mgmt_dev mdev;
+ struct ifcvf_hw vf;
+ struct ifcvf_adapter *adapter;
+ struct pci_dev *pdev;
+};
+
+int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *dev);
+void ifcvf_stop(struct ifcvf_hw *hw);
+void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid);
+void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
+ void *dst, int length);
+void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
+ const void *src, int length);
+u8 ifcvf_get_status(struct ifcvf_hw *hw);
+void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
+void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
+void ifcvf_reset(struct ifcvf_hw *hw);
+u64 ifcvf_get_dev_features(struct ifcvf_hw *hw);
+u64 ifcvf_get_hw_features(struct ifcvf_hw *hw);
+int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features);
+u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
+int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
+struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
+int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
+u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
+u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
+u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
+void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
+int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
+ u64 driver_area, u64 device_area);
+bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
+void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
+void ifcvf_set_driver_features(struct ifcvf_hw *hw, u64 features);
+u64 ifcvf_get_driver_features(struct ifcvf_hw *hw);
+u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw);
+#endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
new file mode 100644
index 0000000000..e98fa8100f
--- /dev/null
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -0,0 +1,882 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel IFC VF NIC driver for virtio dataplane offloading
+ *
+ * Copyright (C) 2020 Intel Corporation.
+ *
+ * Author: Zhu Lingshan <lingshan.zhu@intel.com>
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+#include "ifcvf_base.h"
+
+#define DRIVER_AUTHOR "Intel Corporation"
+#define IFCVF_DRIVER_NAME "ifcvf"
+
+static irqreturn_t ifcvf_config_changed(int irq, void *arg)
+{
+ struct ifcvf_hw *vf = arg;
+
+ if (vf->config_cb.callback)
+ return vf->config_cb.callback(vf->config_cb.private);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ifcvf_vq_intr_handler(int irq, void *arg)
+{
+ struct vring_info *vring = arg;
+
+ if (vring->cb.callback)
+ return vring->cb.callback(vring->cb.private);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ifcvf_vqs_reused_intr_handler(int irq, void *arg)
+{
+ struct ifcvf_hw *vf = arg;
+ struct vring_info *vring;
+ int i;
+
+ for (i = 0; i < vf->nr_vring; i++) {
+ vring = &vf->vring[i];
+ if (vring->cb.callback)
+ vring->cb.callback(vring->cb.private);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ifcvf_dev_intr_handler(int irq, void *arg)
+{
+ struct ifcvf_hw *vf = arg;
+ u8 isr;
+
+ isr = vp_ioread8(vf->isr);
+ if (isr & VIRTIO_PCI_ISR_CONFIG)
+ ifcvf_config_changed(irq, arg);
+
+ return ifcvf_vqs_reused_intr_handler(irq, arg);
+}
+
+static void ifcvf_free_irq_vectors(void *data)
+{
+ pci_free_irq_vectors(data);
+}
+
+static void ifcvf_free_per_vq_irq(struct ifcvf_hw *vf)
+{
+ struct pci_dev *pdev = vf->pdev;
+ int i;
+
+ for (i = 0; i < vf->nr_vring; i++) {
+ if (vf->vring[i].irq != -EINVAL) {
+ devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
+ vf->vring[i].irq = -EINVAL;
+ }
+ }
+}
+
+static void ifcvf_free_vqs_reused_irq(struct ifcvf_hw *vf)
+{
+ struct pci_dev *pdev = vf->pdev;
+
+ if (vf->vqs_reused_irq != -EINVAL) {
+ devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf);
+ vf->vqs_reused_irq = -EINVAL;
+ }
+
+}
+
+static void ifcvf_free_vq_irq(struct ifcvf_hw *vf)
+{
+ if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+ ifcvf_free_per_vq_irq(vf);
+ else
+ ifcvf_free_vqs_reused_irq(vf);
+}
+
+static void ifcvf_free_config_irq(struct ifcvf_hw *vf)
+{
+ struct pci_dev *pdev = vf->pdev;
+
+ if (vf->config_irq == -EINVAL)
+ return;
+
+ /* If the irq is shared by all vqs and the config interrupt,
+ * it is already freed in ifcvf_free_vq_irq, so here only
+ * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED
+ */
+ if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) {
+ devm_free_irq(&pdev->dev, vf->config_irq, vf);
+ vf->config_irq = -EINVAL;
+ }
+}
+
+static void ifcvf_free_irq(struct ifcvf_hw *vf)
+{
+ struct pci_dev *pdev = vf->pdev;
+
+ ifcvf_free_vq_irq(vf);
+ ifcvf_free_config_irq(vf);
+ ifcvf_free_irq_vectors(pdev);
+ vf->num_msix_vectors = 0;
+}
+
+/* ifcvf MSIX vectors allocator, this helper tries to allocate
+ * vectors for all virtqueues and the config interrupt.
+ * It returns the number of allocated vectors, negative
+ * return value when fails.
+ */
+static int ifcvf_alloc_vectors(struct ifcvf_hw *vf)
+{
+ struct pci_dev *pdev = vf->pdev;
+ int max_intr, ret;
+
+ /* all queues and config interrupt */
+ max_intr = vf->nr_vring + 1;
+ ret = pci_alloc_irq_vectors(pdev, 1, max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+
+ if (ret < 0) {
+ IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
+ return ret;
+ }
+
+ if (ret < max_intr)
+ IFCVF_INFO(pdev,
+ "Requested %u vectors, however only %u allocated, lower performance\n",
+ max_intr, ret);
+
+ return ret;
+}
+
+static int ifcvf_request_per_vq_irq(struct ifcvf_hw *vf)
+{
+ struct pci_dev *pdev = vf->pdev;
+ int i, vector, ret, irq;
+
+ vf->vqs_reused_irq = -EINVAL;
+ for (i = 0; i < vf->nr_vring; i++) {
+ snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i);
+ vector = i;
+ irq = pci_irq_vector(pdev, vector);
+ ret = devm_request_irq(&pdev->dev, irq,
+ ifcvf_vq_intr_handler, 0,
+ vf->vring[i].msix_name,
+ &vf->vring[i]);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request irq for vq %d\n", i);
+ goto err;
+ }
+
+ vf->vring[i].irq = irq;
+ ret = ifcvf_set_vq_vector(vf, i, vector);
+ if (ret == VIRTIO_MSI_NO_VECTOR) {
+ IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ ifcvf_free_irq(vf);
+
+ return -EFAULT;
+}
+
+static int ifcvf_request_vqs_reused_irq(struct ifcvf_hw *vf)
+{
+ struct pci_dev *pdev = vf->pdev;
+ int i, vector, ret, irq;
+
+ vector = 0;
+ snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-vqs-reused-irq\n", pci_name(pdev));
+ irq = pci_irq_vector(pdev, vector);
+ ret = devm_request_irq(&pdev->dev, irq,
+ ifcvf_vqs_reused_intr_handler, 0,
+ vf->vring[0].msix_name, vf);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request reused irq for the device\n");
+ goto err;
+ }
+
+ vf->vqs_reused_irq = irq;
+ for (i = 0; i < vf->nr_vring; i++) {
+ vf->vring[i].irq = -EINVAL;
+ ret = ifcvf_set_vq_vector(vf, i, vector);
+ if (ret == VIRTIO_MSI_NO_VECTOR) {
+ IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ ifcvf_free_irq(vf);
+
+ return -EFAULT;
+}
+
+static int ifcvf_request_dev_irq(struct ifcvf_hw *vf)
+{
+ struct pci_dev *pdev = vf->pdev;
+ int i, vector, ret, irq;
+
+ vector = 0;
+ snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-dev-irq\n", pci_name(pdev));
+ irq = pci_irq_vector(pdev, vector);
+ ret = devm_request_irq(&pdev->dev, irq,
+ ifcvf_dev_intr_handler, 0,
+ vf->vring[0].msix_name, vf);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request irq for the device\n");
+ goto err;
+ }
+
+ vf->vqs_reused_irq = irq;
+ for (i = 0; i < vf->nr_vring; i++) {
+ vf->vring[i].irq = -EINVAL;
+ ret = ifcvf_set_vq_vector(vf, i, vector);
+ if (ret == VIRTIO_MSI_NO_VECTOR) {
+ IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
+ goto err;
+ }
+ }
+
+ vf->config_irq = irq;
+ ret = ifcvf_set_config_vector(vf, vector);
+ if (ret == VIRTIO_MSI_NO_VECTOR) {
+ IFCVF_ERR(pdev, "No msix vector for device config\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ ifcvf_free_irq(vf);
+
+ return -EFAULT;
+
+}
+
+static int ifcvf_request_vq_irq(struct ifcvf_hw *vf)
+{
+ int ret;
+
+ if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+ ret = ifcvf_request_per_vq_irq(vf);
+ else
+ ret = ifcvf_request_vqs_reused_irq(vf);
+
+ return ret;
+}
+
+static int ifcvf_request_config_irq(struct ifcvf_hw *vf)
+{
+ struct pci_dev *pdev = vf->pdev;
+ int config_vector, ret;
+
+ if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+ config_vector = vf->nr_vring;
+ else if (vf->msix_vector_status == MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
+ /* vector 0 for vqs and 1 for config interrupt */
+ config_vector = 1;
+ else if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
+ /* re-use the vqs vector */
+ return 0;
+ else
+ return -EINVAL;
+
+ snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
+ pci_name(pdev));
+ vf->config_irq = pci_irq_vector(pdev, config_vector);
+ ret = devm_request_irq(&pdev->dev, vf->config_irq,
+ ifcvf_config_changed, 0,
+ vf->config_msix_name, vf);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request config irq\n");
+ goto err;
+ }
+
+ ret = ifcvf_set_config_vector(vf, config_vector);
+ if (ret == VIRTIO_MSI_NO_VECTOR) {
+ IFCVF_ERR(pdev, "No msix vector for device config\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ ifcvf_free_irq(vf);
+
+ return -EFAULT;
+}
+
+static int ifcvf_request_irq(struct ifcvf_hw *vf)
+{
+ int nvectors, ret, max_intr;
+
+ nvectors = ifcvf_alloc_vectors(vf);
+ if (nvectors <= 0)
+ return -EFAULT;
+
+ vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG;
+ max_intr = vf->nr_vring + 1;
+ if (nvectors < max_intr)
+ vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG;
+
+ if (nvectors == 1) {
+ vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED;
+ ret = ifcvf_request_dev_irq(vf);
+
+ return ret;
+ }
+
+ ret = ifcvf_request_vq_irq(vf);
+ if (ret)
+ return ret;
+
+ ret = ifcvf_request_config_irq(vf);
+
+ if (ret)
+ return ret;
+
+ vf->num_msix_vectors = nvectors;
+
+ return 0;
+}
+
+static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
+{
+ return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
+}
+
+static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
+
+ return adapter->vf;
+}
+
+static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ struct pci_dev *pdev = adapter->pdev;
+ u32 type = vf->dev_type;
+ u64 features;
+
+ if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
+ features = ifcvf_get_dev_features(vf);
+ else {
+ features = 0;
+ IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
+ }
+
+ return features;
+}
+
+static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ int ret;
+
+ ret = ifcvf_verify_min_features(vf, features);
+ if (ret)
+ return ret;
+
+ ifcvf_set_driver_features(vf, features);
+
+ return 0;
+}
+
+static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ u64 features;
+
+ features = ifcvf_get_driver_features(vf);
+
+ return features;
+}
+
+static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ifcvf_get_status(vf);
+}
+
+static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
+{
+ struct ifcvf_hw *vf;
+ u8 status_old;
+ int ret;
+
+ vf = vdpa_to_vf(vdpa_dev);
+ status_old = ifcvf_get_status(vf);
+
+ if (status_old == status)
+ return;
+
+ if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
+ !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ ret = ifcvf_request_irq(vf);
+ if (ret) {
+ IFCVF_ERR(vf->pdev, "failed to request irq with error %d\n", ret);
+ return;
+ }
+ }
+
+ ifcvf_set_status(vf, status);
+}
+
+static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ u8 status = ifcvf_get_status(vf);
+
+ ifcvf_stop(vf);
+
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK)
+ ifcvf_free_irq(vf);
+
+ ifcvf_reset(vf);
+
+ return 0;
+}
+
+static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ifcvf_get_max_vq_size(vf);
+}
+
+static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
+ struct vdpa_vq_state *state)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ state->split.avail_index = ifcvf_get_vq_state(vf, qid);
+ return 0;
+}
+
+static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
+ const struct vdpa_vq_state *state)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
+}
+
+static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
+ struct vdpa_callback *cb)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ vf->vring[qid].cb = *cb;
+}
+
+static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
+ u16 qid, bool ready)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ ifcvf_set_vq_ready(vf, qid, ready);
+}
+
+static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ifcvf_get_vq_ready(vf, qid);
+}
+
+static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
+ u32 num)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ ifcvf_set_vq_num(vf, qid, num);
+}
+
+static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
+ u64 desc_area, u64 driver_area,
+ u64 device_area)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
+}
+
+static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ ifcvf_notify_queue(vf, qid);
+}
+
+static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return vp_ioread8(&vf->common_cfg->config_generation);
+}
+
+static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return vf->dev_type;
+}
+
+static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
+ struct pci_dev *pdev = adapter->pdev;
+
+ return pdev->subsystem_vendor;
+}
+
+static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
+{
+ return IFCVF_QUEUE_ALIGNMENT;
+}
+
+static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return vf->config_size;
+}
+
+static u32 ifcvf_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+{
+ return 0;
+}
+
+static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ ifcvf_read_dev_config(vf, offset, buf, len);
+}
+
+static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
+ unsigned int offset, const void *buf,
+ unsigned int len)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ ifcvf_write_dev_config(vf, offset, buf, len);
+}
+
+static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
+ struct vdpa_callback *cb)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ vf->config_cb.callback = cb->callback;
+ vf->config_cb.private = cb->private;
+}
+
+static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
+ u16 qid)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ if (vf->vqs_reused_irq < 0)
+ return vf->vring[qid].irq;
+ else
+ return -EINVAL;
+}
+
+static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
+ u16 idx)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ struct vdpa_notification_area area;
+
+ area.addr = vf->vring[idx].notify_pa;
+ if (!vf->notify_off_multiplier)
+ area.size = PAGE_SIZE;
+ else
+ area.size = vf->notify_off_multiplier;
+
+ return area;
+}
+
+/*
+ * IFCVF currently doesn't have on-chip IOMMU, so not
+ * implemented set_map()/dma_map()/dma_unmap()
+ */
+static const struct vdpa_config_ops ifc_vdpa_ops = {
+ .get_device_features = ifcvf_vdpa_get_device_features,
+ .set_driver_features = ifcvf_vdpa_set_driver_features,
+ .get_driver_features = ifcvf_vdpa_get_driver_features,
+ .get_status = ifcvf_vdpa_get_status,
+ .set_status = ifcvf_vdpa_set_status,
+ .reset = ifcvf_vdpa_reset,
+ .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
+ .get_vq_state = ifcvf_vdpa_get_vq_state,
+ .set_vq_state = ifcvf_vdpa_set_vq_state,
+ .set_vq_cb = ifcvf_vdpa_set_vq_cb,
+ .set_vq_ready = ifcvf_vdpa_set_vq_ready,
+ .get_vq_ready = ifcvf_vdpa_get_vq_ready,
+ .set_vq_num = ifcvf_vdpa_set_vq_num,
+ .set_vq_address = ifcvf_vdpa_set_vq_address,
+ .get_vq_irq = ifcvf_vdpa_get_vq_irq,
+ .kick_vq = ifcvf_vdpa_kick_vq,
+ .get_generation = ifcvf_vdpa_get_generation,
+ .get_device_id = ifcvf_vdpa_get_device_id,
+ .get_vendor_id = ifcvf_vdpa_get_vendor_id,
+ .get_vq_align = ifcvf_vdpa_get_vq_align,
+ .get_vq_group = ifcvf_vdpa_get_vq_group,
+ .get_config_size = ifcvf_vdpa_get_config_size,
+ .get_config = ifcvf_vdpa_get_config,
+ .set_config = ifcvf_vdpa_set_config,
+ .set_config_cb = ifcvf_vdpa_set_config_cb,
+ .get_vq_notification = ifcvf_get_vq_notification,
+};
+
+static struct virtio_device_id id_table_net[] = {
+ {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
+ {0},
+};
+
+static struct virtio_device_id id_table_blk[] = {
+ {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
+ {0},
+};
+
+static u32 get_dev_type(struct pci_dev *pdev)
+{
+ u32 dev_type;
+
+ /* This drirver drives both modern virtio devices and transitional
+ * devices in modern mode.
+ * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
+ * so legacy devices and transitional devices in legacy
+ * mode will not work for vDPA, this driver will not
+ * drive devices with legacy interface.
+ */
+
+ if (pdev->device < 0x1040)
+ dev_type = pdev->subsystem_device;
+ else
+ dev_type = pdev->device - 0x1040;
+
+ return dev_type;
+}
+
+static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
+ const struct vdpa_dev_set_config *config)
+{
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+ struct ifcvf_adapter *adapter;
+ struct vdpa_device *vdpa_dev;
+ struct pci_dev *pdev;
+ struct ifcvf_hw *vf;
+ u64 device_features;
+ int ret;
+
+ ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
+ vf = &ifcvf_mgmt_dev->vf;
+ pdev = vf->pdev;
+ adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
+ &pdev->dev, &ifc_vdpa_ops, 1, 1, NULL, false);
+ if (IS_ERR(adapter)) {
+ IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
+ return PTR_ERR(adapter);
+ }
+
+ ifcvf_mgmt_dev->adapter = adapter;
+ adapter->pdev = pdev;
+ adapter->vdpa.dma_dev = &pdev->dev;
+ adapter->vdpa.mdev = mdev;
+ adapter->vf = vf;
+ vdpa_dev = &adapter->vdpa;
+
+ device_features = vf->hw_features;
+ if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
+ if (config->device_features & ~device_features) {
+ IFCVF_ERR(pdev, "The provisioned features 0x%llx are not supported by this device with features 0x%llx\n",
+ config->device_features, device_features);
+ return -EINVAL;
+ }
+ device_features &= config->device_features;
+ }
+ vf->dev_features = device_features;
+
+ if (name)
+ ret = dev_set_name(&vdpa_dev->dev, "%s", name);
+ else
+ ret = dev_set_name(&vdpa_dev->dev, "vdpa%u", vdpa_dev->index);
+
+ ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
+ if (ret) {
+ put_device(&adapter->vdpa.dev);
+ IFCVF_ERR(pdev, "Failed to register to vDPA bus");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
+{
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+
+ ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
+ _vdpa_unregister_device(dev);
+ ifcvf_mgmt_dev->adapter = NULL;
+}
+
+static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
+ .dev_add = ifcvf_vdpa_dev_add,
+ .dev_del = ifcvf_vdpa_dev_del
+};
+
+static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+ struct device *dev = &pdev->dev;
+ struct ifcvf_hw *vf;
+ u32 dev_type;
+ int ret, i;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to enable device\n");
+ return ret;
+ }
+ ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
+ IFCVF_DRIVER_NAME);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request MMIO region\n");
+ return ret;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ IFCVF_ERR(pdev, "No usable DMA configuration\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
+ if (ret) {
+ IFCVF_ERR(pdev,
+ "Failed for adding devres for freeing irq vectors\n");
+ return ret;
+ }
+
+ pci_set_master(pdev);
+ ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
+ if (!ifcvf_mgmt_dev) {
+ IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
+ return -ENOMEM;
+ }
+
+ vf = &ifcvf_mgmt_dev->vf;
+ vf->dev_type = get_dev_type(pdev);
+ vf->base = pcim_iomap_table(pdev);
+ vf->pdev = pdev;
+
+ ret = ifcvf_init_hw(vf, pdev);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
+ goto err;
+ }
+
+ for (i = 0; i < vf->nr_vring; i++)
+ vf->vring[i].irq = -EINVAL;
+
+ vf->hw_features = ifcvf_get_hw_features(vf);
+ vf->config_size = ifcvf_get_config_size(vf);
+
+ dev_type = get_dev_type(pdev);
+ switch (dev_type) {
+ case VIRTIO_ID_NET:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_net;
+ break;
+ case VIRTIO_ID_BLOCK:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
+ break;
+ default:
+ IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
+ ifcvf_mgmt_dev->mdev.device = dev;
+ ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring;
+ ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features;
+ ifcvf_mgmt_dev->mdev.config_attr_mask = (1 << VDPA_ATTR_DEV_FEATURES);
+
+ ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
+ if (ret) {
+ IFCVF_ERR(pdev,
+ "Failed to initialize the management interfaces\n");
+ goto err;
+ }
+
+ pci_set_drvdata(pdev, ifcvf_mgmt_dev);
+
+ return 0;
+
+err:
+ kfree(ifcvf_mgmt_dev->vf.vring);
+ kfree(ifcvf_mgmt_dev);
+ return ret;
+}
+
+static void ifcvf_remove(struct pci_dev *pdev)
+{
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+
+ ifcvf_mgmt_dev = pci_get_drvdata(pdev);
+ vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
+ kfree(ifcvf_mgmt_dev->vf.vring);
+ kfree(ifcvf_mgmt_dev);
+}
+
+static struct pci_device_id ifcvf_pci_ids[] = {
+ /* N3000 network device */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
+ N3000_DEVICE_ID,
+ PCI_VENDOR_ID_INTEL,
+ N3000_SUBSYS_DEVICE_ID) },
+ /* C5000X-PL network device
+ * F2000X-PL network device
+ */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
+ VIRTIO_TRANS_ID_NET,
+ PCI_VENDOR_ID_INTEL,
+ VIRTIO_ID_NET) },
+ /* C5000X-PL block device */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
+ VIRTIO_TRANS_ID_BLOCK,
+ PCI_VENDOR_ID_INTEL,
+ VIRTIO_ID_BLOCK) },
+
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
+
+static struct pci_driver ifcvf_driver = {
+ .name = IFCVF_DRIVER_NAME,
+ .id_table = ifcvf_pci_ids,
+ .probe = ifcvf_probe,
+ .remove = ifcvf_remove,
+};
+
+module_pci_driver(ifcvf_driver);
+
+MODULE_LICENSE("GPL v2");