summaryrefslogtreecommitdiffstats
path: root/drivers/vfio/pci
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/vfio/pci
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/vfio/pci')
-rw-r--r--drivers/vfio/pci/Kconfig62
-rw-r--r--drivers/vfio/pci/Makefile13
-rw-r--r--drivers/vfio/pci/hisilicon/Kconfig15
-rw-r--r--drivers/vfio/pci/hisilicon/Makefile4
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c1335
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h115
-rw-r--r--drivers/vfio/pci/mlx5/Kconfig10
-rw-r--r--drivers/vfio/pci/mlx5/Makefile4
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c1362
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h137
-rw-r--r--drivers/vfio/pci/mlx5/main.c696
-rw-r--r--drivers/vfio/pci/trace.h98
-rw-r--r--drivers/vfio/pci/vfio_pci.c276
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c1961
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c2565
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c451
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c723
-rw-r--r--drivers/vfio/pci/vfio_pci_priv.h104
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c502
-rw-r--r--drivers/vfio/pci/vfio_pci_zdev.c169
20 files changed, 10602 insertions, 0 deletions
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
new file mode 100644
index 000000000..f9d0c908e
--- /dev/null
+++ b/drivers/vfio/pci/Kconfig
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-2.0-only
+if PCI && MMU
+config VFIO_PCI_CORE
+ tristate
+ select VFIO_VIRQFD
+ select IRQ_BYPASS_MANAGER
+
+config VFIO_PCI_MMAP
+ def_bool y if !S390
+
+config VFIO_PCI_INTX
+ def_bool y if !S390
+
+config VFIO_PCI
+ tristate "Generic VFIO support for any PCI device"
+ select VFIO_PCI_CORE
+ help
+ Support for the generic PCI VFIO bus driver which can connect any
+ PCI device to the VFIO framework.
+
+ If you don't know what to do here, say N.
+
+if VFIO_PCI
+config VFIO_PCI_VGA
+ bool "Generic VFIO PCI support for VGA devices"
+ depends on X86 && VGA_ARB
+ help
+ Support for VGA extension to VFIO PCI. This exposes an additional
+ region on VGA devices for accessing legacy VGA addresses used by
+ BIOS and generic video drivers.
+
+ If you don't know what to do here, say N.
+
+config VFIO_PCI_IGD
+ bool "Generic VFIO PCI extensions for Intel graphics (GVT-d)"
+ depends on X86
+ default y
+ help
+ Support for Intel IGD specific extensions to enable direct
+ assignment to virtual machines. This includes exposing an IGD
+ specific firmware table and read-only copies of the host bridge
+ and LPC bridge config space.
+
+ To enable Intel IGD assignment through vfio-pci, say Y.
+endif
+
+config VFIO_PCI_ZDEV_KVM
+ bool "VFIO PCI extensions for s390x KVM passthrough"
+ depends on S390 && KVM
+ default y
+ help
+ Support s390x-specific extensions to enable support for enhancements
+ to KVM passthrough capabilities, such as interpretive execution of
+ zPCI instructions.
+
+ To enable s390x KVM vfio-pci extensions, say Y.
+
+source "drivers/vfio/pci/mlx5/Kconfig"
+
+source "drivers/vfio/pci/hisilicon/Kconfig"
+
+endif
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
new file mode 100644
index 000000000..24c524224
--- /dev/null
+++ b/drivers/vfio/pci/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+vfio-pci-core-y := vfio_pci_core.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o
+vfio-pci-core-$(CONFIG_VFIO_PCI_ZDEV_KVM) += vfio_pci_zdev.o
+obj-$(CONFIG_VFIO_PCI_CORE) += vfio-pci-core.o
+
+vfio-pci-y := vfio_pci.o
+vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o
+obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
+
+obj-$(CONFIG_MLX5_VFIO_PCI) += mlx5/
+
+obj-$(CONFIG_HISI_ACC_VFIO_PCI) += hisilicon/
diff --git a/drivers/vfio/pci/hisilicon/Kconfig b/drivers/vfio/pci/hisilicon/Kconfig
new file mode 100644
index 000000000..5daa0f45d
--- /dev/null
+++ b/drivers/vfio/pci/hisilicon/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config HISI_ACC_VFIO_PCI
+ tristate "VFIO PCI support for HiSilicon ACC devices"
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on VFIO_PCI_CORE
+ depends on PCI_MSI
+ depends on CRYPTO_DEV_HISI_QM
+ depends on CRYPTO_DEV_HISI_HPRE
+ depends on CRYPTO_DEV_HISI_SEC2
+ depends on CRYPTO_DEV_HISI_ZIP
+ help
+ This provides generic PCI support for HiSilicon ACC devices
+ using the VFIO framework.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/pci/hisilicon/Makefile b/drivers/vfio/pci/hisilicon/Makefile
new file mode 100644
index 000000000..c66b3783f
--- /dev/null
+++ b/drivers/vfio/pci/hisilicon/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_HISI_ACC_VFIO_PCI) += hisi-acc-vfio-pci.o
+hisi-acc-vfio-pci-y := hisi_acc_vfio_pci.o
+
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
new file mode 100644
index 000000000..39eeca18a
--- /dev/null
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -0,0 +1,1335 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, HiSilicon Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/file.h>
+#include <linux/hisi_acc_qm.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/vfio.h>
+#include <linux/vfio_pci_core.h>
+#include <linux/anon_inodes.h>
+
+#include "hisi_acc_vfio_pci.h"
+
+/* Return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
+static int qm_wait_dev_not_ready(struct hisi_qm *qm)
+{
+ u32 val;
+
+ return readl_relaxed_poll_timeout(qm->io_base + QM_VF_STATE,
+ val, !(val & 0x1), MB_POLL_PERIOD_US,
+ MB_POLL_TIMEOUT_US);
+}
+
+/*
+ * Each state Reg is checked 100 times,
+ * with a delay of 100 microseconds after each check
+ */
+static u32 qm_check_reg_state(struct hisi_qm *qm, u32 regs)
+{
+ int check_times = 0;
+ u32 state;
+
+ state = readl(qm->io_base + regs);
+ while (state && check_times < ERROR_CHECK_TIMEOUT) {
+ udelay(CHECK_DELAY_TIME);
+ state = readl(qm->io_base + regs);
+ check_times++;
+ }
+
+ return state;
+}
+
+static int qm_read_regs(struct hisi_qm *qm, u32 reg_addr,
+ u32 *data, u8 nums)
+{
+ int i;
+
+ if (nums < 1 || nums > QM_REGS_MAX_LEN)
+ return -EINVAL;
+
+ for (i = 0; i < nums; i++) {
+ data[i] = readl(qm->io_base + reg_addr);
+ reg_addr += QM_REG_ADDR_OFFSET;
+ }
+
+ return 0;
+}
+
+static int qm_write_regs(struct hisi_qm *qm, u32 reg,
+ u32 *data, u8 nums)
+{
+ int i;
+
+ if (nums < 1 || nums > QM_REGS_MAX_LEN)
+ return -EINVAL;
+
+ for (i = 0; i < nums; i++)
+ writel(data[i], qm->io_base + reg + i * QM_REG_ADDR_OFFSET);
+
+ return 0;
+}
+
+static int qm_get_vft(struct hisi_qm *qm, u32 *base)
+{
+ u64 sqc_vft;
+ u32 qp_num;
+ int ret;
+
+ ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
+ if (ret)
+ return ret;
+
+ sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
+ ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
+ QM_XQC_ADDR_OFFSET);
+ *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
+ qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
+ (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
+
+ return qp_num;
+}
+
+static int qm_get_sqc(struct hisi_qm *qm, u64 *addr)
+{
+ int ret;
+
+ ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, 0, 0, 1);
+ if (ret)
+ return ret;
+
+ *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
+ ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
+ QM_XQC_ADDR_OFFSET);
+
+ return 0;
+}
+
+static int qm_get_cqc(struct hisi_qm *qm, u64 *addr)
+{
+ int ret;
+
+ ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, 0, 0, 1);
+ if (ret)
+ return ret;
+
+ *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
+ ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
+ QM_XQC_ADDR_OFFSET);
+
+ return 0;
+}
+
+static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
+{
+ struct device *dev = &qm->pdev->dev;
+ int ret;
+
+ ret = qm_read_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_VF_AEQ_INT_MASK\n");
+ return ret;
+ }
+
+ ret = qm_read_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_VF_EQ_INT_MASK\n");
+ return ret;
+ }
+
+ ret = qm_read_regs(qm, QM_IFC_INT_SOURCE_V,
+ &vf_data->ifc_int_source, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_IFC_INT_SOURCE_V\n");
+ return ret;
+ }
+
+ ret = qm_read_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_IFC_INT_MASK\n");
+ return ret;
+ }
+
+ ret = qm_read_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_IFC_INT_SET_V\n");
+ return ret;
+ }
+
+ ret = qm_read_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_PAGE_SIZE\n");
+ return ret;
+ }
+
+ /* QM_EQC_DW has 7 regs */
+ ret = qm_read_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
+ if (ret) {
+ dev_err(dev, "failed to read QM_EQC_DW\n");
+ return ret;
+ }
+
+ /* QM_AEQC_DW has 7 regs */
+ ret = qm_read_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
+ if (ret) {
+ dev_err(dev, "failed to read QM_AEQC_DW\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
+{
+ struct device *dev = &qm->pdev->dev;
+ int ret;
+
+ /* Check VF state */
+ if (unlikely(hisi_qm_wait_mb_ready(qm))) {
+ dev_err(&qm->pdev->dev, "QM device is not ready to write\n");
+ return -EBUSY;
+ }
+
+ ret = qm_write_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_VF_AEQ_INT_MASK\n");
+ return ret;
+ }
+
+ ret = qm_write_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_VF_EQ_INT_MASK\n");
+ return ret;
+ }
+
+ ret = qm_write_regs(qm, QM_IFC_INT_SOURCE_V,
+ &vf_data->ifc_int_source, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_IFC_INT_SOURCE_V\n");
+ return ret;
+ }
+
+ ret = qm_write_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_IFC_INT_MASK\n");
+ return ret;
+ }
+
+ ret = qm_write_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_IFC_INT_SET_V\n");
+ return ret;
+ }
+
+ ret = qm_write_regs(qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_QUE_ISO_CFG_V\n");
+ return ret;
+ }
+
+ ret = qm_write_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_PAGE_SIZE\n");
+ return ret;
+ }
+
+ /* QM_EQC_DW has 7 regs */
+ ret = qm_write_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
+ if (ret) {
+ dev_err(dev, "failed to write QM_EQC_DW\n");
+ return ret;
+ }
+
+ /* QM_AEQC_DW has 7 regs */
+ ret = qm_write_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
+ if (ret) {
+ dev_err(dev, "failed to write QM_AEQC_DW\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd,
+ u16 index, u8 priority)
+{
+ u64 doorbell;
+ u64 dbase;
+ u16 randata = 0;
+
+ if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
+ dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
+ else
+ dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
+
+ doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
+ ((u64)randata << QM_DB_RAND_SHIFT_V2) |
+ ((u64)index << QM_DB_INDEX_SHIFT_V2) |
+ ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
+
+ writeq(doorbell, qm->io_base + dbase);
+}
+
+static int pf_qm_get_qp_num(struct hisi_qm *qm, int vf_id, u32 *rbase)
+{
+ unsigned int val;
+ u64 sqc_vft;
+ u32 qp_num;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
+ val & BIT(0), MB_POLL_PERIOD_US,
+ MB_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
+ /* 0 mean SQC VFT */
+ writel(0x0, qm->io_base + QM_VFT_CFG_TYPE);
+ writel(vf_id, qm->io_base + QM_VFT_CFG);
+
+ writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
+ writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
+ val & BIT(0), MB_POLL_PERIOD_US,
+ MB_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ sqc_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
+ ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) <<
+ QM_XQC_ADDR_OFFSET);
+ *rbase = QM_SQC_VFT_BASE_MASK_V2 &
+ (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
+ qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
+ (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
+
+ return qp_num;
+}
+
+static void qm_dev_cmd_init(struct hisi_qm *qm)
+{
+ /* Clear VF communication status registers. */
+ writel(0x1, qm->io_base + QM_IFC_INT_SOURCE_V);
+
+ /* Enable pf and vf communication. */
+ writel(0x0, qm->io_base + QM_IFC_INT_MASK);
+}
+
+static int vf_qm_cache_wb(struct hisi_qm *qm)
+{
+ unsigned int val;
+
+ writel(0x1, qm->io_base + QM_CACHE_WB_START);
+ if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
+ val, val & BIT(0), MB_POLL_PERIOD_US,
+ MB_POLL_TIMEOUT_US)) {
+ dev_err(&qm->pdev->dev, "vf QM writeback sqc cache fail\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vf_qm_fun_reset(struct hisi_qm *qm)
+{
+ int i;
+
+ for (i = 0; i < qm->qp_num; i++)
+ qm_db(qm, i, QM_DOORBELL_CMD_SQ, 0, 1);
+}
+
+static int vf_qm_func_stop(struct hisi_qm *qm)
+{
+ return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
+}
+
+static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ struct hisi_acc_vf_migration_file *migf)
+{
+ struct acc_vf_data *vf_data = &migf->vf_data;
+ struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+ struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
+ struct device *dev = &vf_qm->pdev->dev;
+ u32 que_iso_state;
+ int ret;
+
+ if (migf->total_length < QM_MATCH_SIZE)
+ return -EINVAL;
+
+ if (vf_data->acc_magic != ACC_DEV_MAGIC) {
+ dev_err(dev, "failed to match ACC_DEV_MAGIC\n");
+ return -EINVAL;
+ }
+
+ if (vf_data->dev_id != hisi_acc_vdev->vf_dev->device) {
+ dev_err(dev, "failed to match VF devices\n");
+ return -EINVAL;
+ }
+
+ /* VF qp num check */
+ ret = qm_get_vft(vf_qm, &vf_qm->qp_base);
+ if (ret <= 0) {
+ dev_err(dev, "failed to get vft qp nums\n");
+ return -EINVAL;
+ }
+
+ if (ret != vf_data->qp_num) {
+ dev_err(dev, "failed to match VF qp num\n");
+ return -EINVAL;
+ }
+
+ vf_qm->qp_num = ret;
+
+ /* VF isolation state check */
+ ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &que_iso_state, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_QUE_ISO_CFG_V\n");
+ return ret;
+ }
+
+ if (vf_data->que_iso_cfg != que_iso_state) {
+ dev_err(dev, "failed to match isolation state\n");
+ return -EINVAL;
+ }
+
+ ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_VF_STATE\n");
+ return ret;
+ }
+
+ hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
+ return 0;
+}
+
+static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ struct acc_vf_data *vf_data)
+{
+ struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
+ struct device *dev = &pf_qm->pdev->dev;
+ int vf_id = hisi_acc_vdev->vf_id;
+ int ret;
+
+ vf_data->acc_magic = ACC_DEV_MAGIC;
+ /* Save device id */
+ vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
+
+ /* VF qp num save from PF */
+ ret = pf_qm_get_qp_num(pf_qm, vf_id, &vf_data->qp_base);
+ if (ret <= 0) {
+ dev_err(dev, "failed to get vft qp nums!\n");
+ return -EINVAL;
+ }
+
+ vf_data->qp_num = ret;
+
+ /* VF isolation state save from PF */
+ ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
+ if (ret) {
+ dev_err(dev, "failed to read QM_QUE_ISO_CFG_V!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ struct hisi_acc_vf_migration_file *migf)
+{
+ struct hisi_qm *qm = &hisi_acc_vdev->vf_qm;
+ struct device *dev = &qm->pdev->dev;
+ struct acc_vf_data *vf_data = &migf->vf_data;
+ int ret;
+
+ /* Return if only match data was transferred */
+ if (migf->total_length == QM_MATCH_SIZE)
+ return 0;
+
+ if (migf->total_length < sizeof(struct acc_vf_data))
+ return -EINVAL;
+
+ qm->eqe_dma = vf_data->eqe_dma;
+ qm->aeqe_dma = vf_data->aeqe_dma;
+ qm->sqc_dma = vf_data->sqc_dma;
+ qm->cqc_dma = vf_data->cqc_dma;
+
+ qm->qp_base = vf_data->qp_base;
+ qm->qp_num = vf_data->qp_num;
+
+ ret = qm_set_regs(qm, vf_data);
+ if (ret) {
+ dev_err(dev, "set VF regs failed\n");
+ return ret;
+ }
+
+ ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
+ if (ret) {
+ dev_err(dev, "set sqc failed\n");
+ return ret;
+ }
+
+ ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
+ if (ret) {
+ dev_err(dev, "set cqc failed\n");
+ return ret;
+ }
+
+ qm_dev_cmd_init(qm);
+ return 0;
+}
+
+static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ struct hisi_acc_vf_migration_file *migf)
+{
+ struct acc_vf_data *vf_data = &migf->vf_data;
+ struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+ struct device *dev = &vf_qm->pdev->dev;
+ int ret;
+
+ ret = vf_qm_get_match_data(hisi_acc_vdev, vf_data);
+ if (ret)
+ return ret;
+
+ if (unlikely(qm_wait_dev_not_ready(vf_qm))) {
+ /* Update state and return with match data */
+ vf_data->vf_qm_state = QM_NOT_READY;
+ hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
+ migf->total_length = QM_MATCH_SIZE;
+ return 0;
+ }
+
+ vf_data->vf_qm_state = QM_READY;
+ hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
+
+ ret = vf_qm_cache_wb(vf_qm);
+ if (ret) {
+ dev_err(dev, "failed to writeback QM Cache!\n");
+ return ret;
+ }
+
+ ret = qm_get_regs(vf_qm, vf_data);
+ if (ret)
+ return -EINVAL;
+
+ /* Every reg is 32 bit, the dma address is 64 bit. */
+ vf_data->eqe_dma = vf_data->qm_eqc_dw[1];
+ vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
+ vf_data->eqe_dma |= vf_data->qm_eqc_dw[0];
+ vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1];
+ vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
+ vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0];
+
+ /* Through SQC_BT/CQC_BT to get sqc and cqc address */
+ ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
+ if (ret) {
+ dev_err(dev, "failed to read SQC addr!\n");
+ return -EINVAL;
+ }
+
+ ret = qm_get_cqc(vf_qm, &vf_data->cqc_dma);
+ if (ret) {
+ dev_err(dev, "failed to read CQC addr!\n");
+ return -EINVAL;
+ }
+
+ migf->total_length = sizeof(struct acc_vf_data);
+ return 0;
+}
+
+static struct hisi_acc_vf_core_device *hisi_acc_drvdata(struct pci_dev *pdev)
+{
+ struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
+
+ return container_of(core_device, struct hisi_acc_vf_core_device,
+ core_device);
+}
+
+/* Check the PF's RAS state and Function INT state */
+static int
+hisi_acc_check_int_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ struct hisi_qm *vfqm = &hisi_acc_vdev->vf_qm;
+ struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
+ struct pci_dev *vf_pdev = hisi_acc_vdev->vf_dev;
+ struct device *dev = &qm->pdev->dev;
+ u32 state;
+
+ /* Check RAS state */
+ state = qm_check_reg_state(qm, QM_ABNORMAL_INT_STATUS);
+ if (state) {
+ dev_err(dev, "failed to check QM RAS state!\n");
+ return -EBUSY;
+ }
+
+ /* Check Function Communication state between PF and VF */
+ state = qm_check_reg_state(vfqm, QM_IFC_INT_STATUS);
+ if (state) {
+ dev_err(dev, "failed to check QM IFC INT state!\n");
+ return -EBUSY;
+ }
+ state = qm_check_reg_state(vfqm, QM_IFC_INT_SET_V);
+ if (state) {
+ dev_err(dev, "failed to check QM IFC INT SET state!\n");
+ return -EBUSY;
+ }
+
+ /* Check submodule task state */
+ switch (vf_pdev->device) {
+ case PCI_DEVICE_ID_HUAWEI_SEC_VF:
+ state = qm_check_reg_state(qm, SEC_CORE_INT_STATUS);
+ if (state) {
+ dev_err(dev, "failed to check QM SEC Core INT state!\n");
+ return -EBUSY;
+ }
+ return 0;
+ case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
+ state = qm_check_reg_state(qm, HPRE_HAC_INT_STATUS);
+ if (state) {
+ dev_err(dev, "failed to check QM HPRE HAC INT state!\n");
+ return -EBUSY;
+ }
+ return 0;
+ case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
+ state = qm_check_reg_state(qm, HZIP_CORE_INT_STATUS);
+ if (state) {
+ dev_err(dev, "failed to check QM ZIP Core INT state!\n");
+ return -EBUSY;
+ }
+ return 0;
+ default:
+ dev_err(dev, "failed to detect acc module type!\n");
+ return -EINVAL;
+ }
+}
+
+static void hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file *migf)
+{
+ mutex_lock(&migf->lock);
+ migf->disabled = true;
+ migf->total_length = 0;
+ migf->filp->f_pos = 0;
+ mutex_unlock(&migf->lock);
+}
+
+static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ if (hisi_acc_vdev->resuming_migf) {
+ hisi_acc_vf_disable_fd(hisi_acc_vdev->resuming_migf);
+ fput(hisi_acc_vdev->resuming_migf->filp);
+ hisi_acc_vdev->resuming_migf = NULL;
+ }
+
+ if (hisi_acc_vdev->saving_migf) {
+ hisi_acc_vf_disable_fd(hisi_acc_vdev->saving_migf);
+ fput(hisi_acc_vdev->saving_migf->filp);
+ hisi_acc_vdev->saving_migf = NULL;
+ }
+}
+
+/*
+ * This function is called in all state_mutex unlock cases to
+ * handle a 'deferred_reset' if exists.
+ */
+static void
+hisi_acc_vf_state_mutex_unlock(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+again:
+ spin_lock(&hisi_acc_vdev->reset_lock);
+ if (hisi_acc_vdev->deferred_reset) {
+ hisi_acc_vdev->deferred_reset = false;
+ spin_unlock(&hisi_acc_vdev->reset_lock);
+ hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
+ hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
+ hisi_acc_vf_disable_fds(hisi_acc_vdev);
+ goto again;
+ }
+ mutex_unlock(&hisi_acc_vdev->state_mutex);
+ spin_unlock(&hisi_acc_vdev->reset_lock);
+}
+
+static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+
+ if (hisi_acc_vdev->vf_qm_state != QM_READY)
+ return;
+
+ /* Make sure the device is enabled */
+ qm_dev_cmd_init(vf_qm);
+
+ vf_qm_fun_reset(vf_qm);
+}
+
+static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ struct device *dev = &hisi_acc_vdev->vf_dev->dev;
+ struct hisi_acc_vf_migration_file *migf = hisi_acc_vdev->resuming_migf;
+ int ret;
+
+ /* Check dev compatibility */
+ ret = vf_qm_check_match(hisi_acc_vdev, migf);
+ if (ret) {
+ dev_err(dev, "failed to match the VF!\n");
+ return ret;
+ }
+ /* Recover data to VF */
+ ret = vf_qm_load_data(hisi_acc_vdev, migf);
+ if (ret) {
+ dev_err(dev, "failed to recover the VF!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hisi_acc_vf_release_file(struct inode *inode, struct file *filp)
+{
+ struct hisi_acc_vf_migration_file *migf = filp->private_data;
+
+ hisi_acc_vf_disable_fd(migf);
+ mutex_destroy(&migf->lock);
+ kfree(migf);
+ return 0;
+}
+
+static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct hisi_acc_vf_migration_file *migf = filp->private_data;
+ loff_t requested_length;
+ ssize_t done = 0;
+ int ret;
+
+ if (pos)
+ return -ESPIPE;
+ pos = &filp->f_pos;
+
+ if (*pos < 0 ||
+ check_add_overflow((loff_t)len, *pos, &requested_length))
+ return -EINVAL;
+
+ if (requested_length > sizeof(struct acc_vf_data))
+ return -ENOMEM;
+
+ mutex_lock(&migf->lock);
+ if (migf->disabled) {
+ done = -ENODEV;
+ goto out_unlock;
+ }
+
+ ret = copy_from_user(&migf->vf_data, buf, len);
+ if (ret) {
+ done = -EFAULT;
+ goto out_unlock;
+ }
+ *pos += len;
+ done = len;
+ migf->total_length += len;
+out_unlock:
+ mutex_unlock(&migf->lock);
+ return done;
+}
+
+static const struct file_operations hisi_acc_vf_resume_fops = {
+ .owner = THIS_MODULE,
+ .write = hisi_acc_vf_resume_write,
+ .release = hisi_acc_vf_release_file,
+ .llseek = no_llseek,
+};
+
+static struct hisi_acc_vf_migration_file *
+hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ struct hisi_acc_vf_migration_file *migf;
+
+ migf = kzalloc(sizeof(*migf), GFP_KERNEL);
+ if (!migf)
+ return ERR_PTR(-ENOMEM);
+
+ migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_resume_fops, migf,
+ O_WRONLY);
+ if (IS_ERR(migf->filp)) {
+ int err = PTR_ERR(migf->filp);
+
+ kfree(migf);
+ return ERR_PTR(err);
+ }
+
+ stream_open(migf->filp->f_inode, migf->filp);
+ mutex_init(&migf->lock);
+ return migf;
+}
+
+static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t len,
+ loff_t *pos)
+{
+ struct hisi_acc_vf_migration_file *migf = filp->private_data;
+ ssize_t done = 0;
+ int ret;
+
+ if (pos)
+ return -ESPIPE;
+ pos = &filp->f_pos;
+
+ mutex_lock(&migf->lock);
+ if (*pos > migf->total_length) {
+ done = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (migf->disabled) {
+ done = -ENODEV;
+ goto out_unlock;
+ }
+
+ len = min_t(size_t, migf->total_length - *pos, len);
+ if (len) {
+ ret = copy_to_user(buf, &migf->vf_data, len);
+ if (ret) {
+ done = -EFAULT;
+ goto out_unlock;
+ }
+ *pos += len;
+ done = len;
+ }
+out_unlock:
+ mutex_unlock(&migf->lock);
+ return done;
+}
+
+static const struct file_operations hisi_acc_vf_save_fops = {
+ .owner = THIS_MODULE,
+ .read = hisi_acc_vf_save_read,
+ .release = hisi_acc_vf_release_file,
+ .llseek = no_llseek,
+};
+
+static struct hisi_acc_vf_migration_file *
+hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ struct hisi_acc_vf_migration_file *migf;
+ int ret;
+
+ migf = kzalloc(sizeof(*migf), GFP_KERNEL);
+ if (!migf)
+ return ERR_PTR(-ENOMEM);
+
+ migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_save_fops, migf,
+ O_RDONLY);
+ if (IS_ERR(migf->filp)) {
+ int err = PTR_ERR(migf->filp);
+
+ kfree(migf);
+ return ERR_PTR(err);
+ }
+
+ stream_open(migf->filp->f_inode, migf->filp);
+ mutex_init(&migf->lock);
+
+ ret = vf_qm_state_save(hisi_acc_vdev, migf);
+ if (ret) {
+ fput(migf->filp);
+ return ERR_PTR(ret);
+ }
+
+ return migf;
+}
+
+static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ struct device *dev = &hisi_acc_vdev->vf_dev->dev;
+ struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+ int ret;
+
+ ret = vf_qm_func_stop(vf_qm);
+ if (ret) {
+ dev_err(dev, "failed to stop QM VF function!\n");
+ return ret;
+ }
+
+ ret = hisi_acc_check_int_state(hisi_acc_vdev);
+ if (ret) {
+ dev_err(dev, "failed to check QM INT state!\n");
+ return ret;
+ }
+ return 0;
+}
+
+static struct file *
+hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ u32 new)
+{
+ u32 cur = hisi_acc_vdev->mig_state;
+ int ret;
+
+ if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_STOP) {
+ ret = hisi_acc_vf_stop_device(hisi_acc_vdev);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
+ struct hisi_acc_vf_migration_file *migf;
+
+ migf = hisi_acc_vf_stop_copy(hisi_acc_vdev);
+ if (IS_ERR(migf))
+ return ERR_CAST(migf);
+ get_file(migf->filp);
+ hisi_acc_vdev->saving_migf = migf;
+ return migf->filp;
+ }
+
+ if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) {
+ hisi_acc_vf_disable_fds(hisi_acc_vdev);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
+ struct hisi_acc_vf_migration_file *migf;
+
+ migf = hisi_acc_vf_pci_resume(hisi_acc_vdev);
+ if (IS_ERR(migf))
+ return ERR_CAST(migf);
+ get_file(migf->filp);
+ hisi_acc_vdev->resuming_migf = migf;
+ return migf->filp;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
+ ret = hisi_acc_vf_load_state(hisi_acc_vdev);
+ if (ret)
+ return ERR_PTR(ret);
+ hisi_acc_vf_disable_fds(hisi_acc_vdev);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING) {
+ hisi_acc_vf_start_device(hisi_acc_vdev);
+ return NULL;
+ }
+
+ /*
+ * vfio_mig_get_next_state() does not use arcs other than the above
+ */
+ WARN_ON(true);
+ return ERR_PTR(-EINVAL);
+}
+
+static struct file *
+hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev,
+ enum vfio_device_mig_state new_state)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
+ struct hisi_acc_vf_core_device, core_device.vdev);
+ enum vfio_device_mig_state next_state;
+ struct file *res = NULL;
+ int ret;
+
+ mutex_lock(&hisi_acc_vdev->state_mutex);
+ while (new_state != hisi_acc_vdev->mig_state) {
+ ret = vfio_mig_get_next_state(vdev,
+ hisi_acc_vdev->mig_state,
+ new_state, &next_state);
+ if (ret) {
+ res = ERR_PTR(-EINVAL);
+ break;
+ }
+
+ res = hisi_acc_vf_set_device_state(hisi_acc_vdev, next_state);
+ if (IS_ERR(res))
+ break;
+ hisi_acc_vdev->mig_state = next_state;
+ if (WARN_ON(res && new_state != hisi_acc_vdev->mig_state)) {
+ fput(res);
+ res = ERR_PTR(-EINVAL);
+ break;
+ }
+ }
+ hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
+ return res;
+}
+
+static int
+hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
+ enum vfio_device_mig_state *curr_state)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
+ struct hisi_acc_vf_core_device, core_device.vdev);
+
+ mutex_lock(&hisi_acc_vdev->state_mutex);
+ *curr_state = hisi_acc_vdev->mig_state;
+ hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
+ return 0;
+}
+
+static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
+
+ if (hisi_acc_vdev->core_device.vdev.migration_flags !=
+ VFIO_MIGRATION_STOP_COPY)
+ return;
+
+ /*
+ * As the higher VFIO layers are holding locks across reset and using
+ * those same locks with the mm_lock we need to prevent ABBA deadlock
+ * with the state_mutex and mm_lock.
+ * In case the state_mutex was taken already we defer the cleanup work
+ * to the unlock flow of the other running context.
+ */
+ spin_lock(&hisi_acc_vdev->reset_lock);
+ hisi_acc_vdev->deferred_reset = true;
+ if (!mutex_trylock(&hisi_acc_vdev->state_mutex)) {
+ spin_unlock(&hisi_acc_vdev->reset_lock);
+ return;
+ }
+ spin_unlock(&hisi_acc_vdev->reset_lock);
+ hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
+}
+
+static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+ struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
+ struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+ struct pci_dev *vf_dev = vdev->pdev;
+
+ /*
+ * ACC VF dev BAR2 region consists of both functional register space
+ * and migration control register space. For migration to work, we
+ * need access to both. Hence, we map the entire BAR2 region here.
+ * But unnecessarily exposing the migration BAR region to the Guest
+ * has the potential to prevent/corrupt the Guest migration. Hence,
+ * we restrict access to the migration control space from
+ * Guest(Please see mmap/ioctl/read/write override functions).
+ *
+ * Please note that it is OK to expose the entire VF BAR if migration
+ * is not supported or required as this cannot affect the ACC PF
+ * configurations.
+ *
+ * Also the HiSilicon ACC VF devices supported by this driver on
+ * HiSilicon hardware platforms are integrated end point devices
+ * and the platform lacks the capability to perform any PCIe P2P
+ * between these devices.
+ */
+
+ vf_qm->io_base =
+ ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
+ pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
+ if (!vf_qm->io_base)
+ return -EIO;
+
+ vf_qm->fun_type = QM_HW_VF;
+ vf_qm->pdev = vf_dev;
+ mutex_init(&vf_qm->mailbox_lock);
+
+ return 0;
+}
+
+static struct hisi_qm *hisi_acc_get_pf_qm(struct pci_dev *pdev)
+{
+ struct hisi_qm *pf_qm;
+ struct pci_driver *pf_driver;
+
+ if (!pdev->is_virtfn)
+ return NULL;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_HUAWEI_SEC_VF:
+ pf_driver = hisi_sec_get_pf_driver();
+ break;
+ case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
+ pf_driver = hisi_hpre_get_pf_driver();
+ break;
+ case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
+ pf_driver = hisi_zip_get_pf_driver();
+ break;
+ default:
+ return NULL;
+ }
+
+ if (!pf_driver)
+ return NULL;
+
+ pf_qm = pci_iov_get_pf_drvdata(pdev, pf_driver);
+
+ return !IS_ERR(pf_qm) ? pf_qm : NULL;
+}
+
+static int hisi_acc_pci_rw_access_check(struct vfio_device *core_vdev,
+ size_t count, loff_t *ppos,
+ size_t *new_count)
+{
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+
+ if (index == VFIO_PCI_BAR2_REGION_INDEX) {
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
+
+ /* Check if access is for migration control region */
+ if (pos >= end)
+ return -EINVAL;
+
+ *new_count = min(count, (size_t)(end - pos));
+ }
+
+ return 0;
+}
+
+static int hisi_acc_vfio_pci_mmap(struct vfio_device *core_vdev,
+ struct vm_area_struct *vma)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ unsigned int index;
+
+ index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+ if (index == VFIO_PCI_BAR2_REGION_INDEX) {
+ u64 req_len, pgoff, req_start;
+ resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
+
+ req_len = vma->vm_end - vma->vm_start;
+ pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+ req_start = pgoff << PAGE_SHIFT;
+
+ if (req_start + req_len > end)
+ return -EINVAL;
+ }
+
+ return vfio_pci_core_mmap(core_vdev, vma);
+}
+
+static ssize_t hisi_acc_vfio_pci_write(struct vfio_device *core_vdev,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ size_t new_count = count;
+ int ret;
+
+ ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
+ if (ret)
+ return ret;
+
+ return vfio_pci_core_write(core_vdev, buf, new_count, ppos);
+}
+
+static ssize_t hisi_acc_vfio_pci_read(struct vfio_device *core_vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ size_t new_count = count;
+ int ret;
+
+ ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
+ if (ret)
+ return ret;
+
+ return vfio_pci_core_read(core_vdev, buf, new_count, ppos);
+}
+
+static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
+ unsigned long arg)
+{
+ if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ struct pci_dev *pdev = vdev->pdev;
+ struct vfio_region_info info;
+ unsigned long minsz;
+
+ minsz = offsetofend(struct vfio_region_info, offset);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ if (info.index == VFIO_PCI_BAR2_REGION_INDEX) {
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+
+ /*
+ * ACC VF dev BAR2 region consists of both functional
+ * register space and migration control register space.
+ * Report only the functional region to Guest.
+ */
+ info.size = pci_resource_len(pdev, info.index) / 2;
+
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE |
+ VFIO_REGION_INFO_FLAG_MMAP;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+ }
+ }
+ return vfio_pci_core_ioctl(core_vdev, cmd, arg);
+}
+
+static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
+ struct hisi_acc_vf_core_device, core_device.vdev);
+ struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
+ int ret;
+
+ ret = vfio_pci_core_enable(vdev);
+ if (ret)
+ return ret;
+
+ if (core_vdev->mig_ops) {
+ ret = hisi_acc_vf_qm_init(hisi_acc_vdev);
+ if (ret) {
+ vfio_pci_core_disable(vdev);
+ return ret;
+ }
+ hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
+ }
+
+ vfio_pci_core_finish_enable(vdev);
+ return 0;
+}
+
+static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
+ struct hisi_acc_vf_core_device, core_device.vdev);
+ struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+
+ iounmap(vf_qm->io_base);
+ vfio_pci_core_close_device(core_vdev);
+}
+
+static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = {
+ .migration_set_state = hisi_acc_vfio_pci_set_device_state,
+ .migration_get_state = hisi_acc_vfio_pci_get_device_state,
+};
+
+static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
+ struct hisi_acc_vf_core_device, core_device.vdev);
+ struct pci_dev *pdev = to_pci_dev(core_vdev->dev);
+ struct hisi_qm *pf_qm = hisi_acc_get_pf_qm(pdev);
+
+ hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1;
+ hisi_acc_vdev->pf_qm = pf_qm;
+ hisi_acc_vdev->vf_dev = pdev;
+ mutex_init(&hisi_acc_vdev->state_mutex);
+
+ core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY;
+ core_vdev->mig_ops = &hisi_acc_vfio_pci_migrn_state_ops;
+
+ return vfio_pci_core_init_dev(core_vdev);
+}
+
+static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
+ .name = "hisi-acc-vfio-pci-migration",
+ .init = hisi_acc_vfio_pci_migrn_init_dev,
+ .release = vfio_pci_core_release_dev,
+ .open_device = hisi_acc_vfio_pci_open_device,
+ .close_device = hisi_acc_vfio_pci_close_device,
+ .ioctl = hisi_acc_vfio_pci_ioctl,
+ .device_feature = vfio_pci_core_ioctl_feature,
+ .read = hisi_acc_vfio_pci_read,
+ .write = hisi_acc_vfio_pci_write,
+ .mmap = hisi_acc_vfio_pci_mmap,
+ .request = vfio_pci_core_request,
+ .match = vfio_pci_core_match,
+};
+
+static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
+ .name = "hisi-acc-vfio-pci",
+ .init = vfio_pci_core_init_dev,
+ .release = vfio_pci_core_release_dev,
+ .open_device = hisi_acc_vfio_pci_open_device,
+ .close_device = vfio_pci_core_close_device,
+ .ioctl = vfio_pci_core_ioctl,
+ .device_feature = vfio_pci_core_ioctl_feature,
+ .read = vfio_pci_core_read,
+ .write = vfio_pci_core_write,
+ .mmap = vfio_pci_core_mmap,
+ .request = vfio_pci_core_request,
+ .match = vfio_pci_core_match,
+};
+
+static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev;
+ const struct vfio_device_ops *ops = &hisi_acc_vfio_pci_ops;
+ struct hisi_qm *pf_qm;
+ int vf_id;
+ int ret;
+
+ pf_qm = hisi_acc_get_pf_qm(pdev);
+ if (pf_qm && pf_qm->ver >= QM_HW_V3) {
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id >= 0)
+ ops = &hisi_acc_vfio_pci_migrn_ops;
+ else
+ pci_warn(pdev, "migration support failed, continue with generic interface\n");
+ }
+
+ hisi_acc_vdev = vfio_alloc_device(hisi_acc_vf_core_device,
+ core_device.vdev, &pdev->dev, ops);
+ if (IS_ERR(hisi_acc_vdev))
+ return PTR_ERR(hisi_acc_vdev);
+
+ dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device);
+ ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device);
+ if (ret)
+ goto out_put_vdev;
+ return 0;
+
+out_put_vdev:
+ vfio_put_device(&hisi_acc_vdev->core_device.vdev);
+ return ret;
+}
+
+static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
+
+ vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device);
+ vfio_put_device(&hisi_acc_vdev->core_device.vdev);
+}
+
+static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) },
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) },
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
+
+static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
+ .reset_done = hisi_acc_vf_pci_aer_reset_done,
+ .error_detected = vfio_pci_core_aer_err_detected,
+};
+
+static struct pci_driver hisi_acc_vfio_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = hisi_acc_vfio_pci_table,
+ .probe = hisi_acc_vfio_pci_probe,
+ .remove = hisi_acc_vfio_pci_remove,
+ .err_handler = &hisi_acc_vf_err_handlers,
+ .driver_managed_dma = true,
+};
+
+module_pci_driver(hisi_acc_vfio_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Liu Longfang <liulongfang@huawei.com>");
+MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
+MODULE_DESCRIPTION("HiSilicon VFIO PCI - VFIO PCI driver with live migration support for HiSilicon ACC device family");
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
new file mode 100644
index 000000000..67343325b
--- /dev/null
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021 HiSilicon Ltd. */
+
+#ifndef HISI_ACC_VFIO_PCI_H
+#define HISI_ACC_VFIO_PCI_H
+
+#include <linux/hisi_acc_qm.h>
+
+#define MB_POLL_PERIOD_US 10
+#define MB_POLL_TIMEOUT_US 1000
+#define QM_CACHE_WB_START 0x204
+#define QM_CACHE_WB_DONE 0x208
+#define QM_MB_CMD_PAUSE_QM 0xe
+#define QM_ABNORMAL_INT_STATUS 0x100008
+#define QM_IFC_INT_STATUS 0x0028
+#define SEC_CORE_INT_STATUS 0x301008
+#define HPRE_HAC_INT_STATUS 0x301800
+#define HZIP_CORE_INT_STATUS 0x3010AC
+
+#define QM_VFT_CFG_RDY 0x10006c
+#define QM_VFT_CFG_OP_WR 0x100058
+#define QM_VFT_CFG_TYPE 0x10005c
+#define QM_VFT_CFG 0x100060
+#define QM_VFT_CFG_OP_ENABLE 0x100054
+#define QM_VFT_CFG_DATA_L 0x100064
+#define QM_VFT_CFG_DATA_H 0x100068
+
+#define ERROR_CHECK_TIMEOUT 100
+#define CHECK_DELAY_TIME 100
+
+#define QM_SQC_VFT_BASE_SHIFT_V2 28
+#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
+#define QM_SQC_VFT_NUM_SHIFT_V2 45
+#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
+
+/* RW regs */
+#define QM_REGS_MAX_LEN 7
+#define QM_REG_ADDR_OFFSET 0x0004
+
+#define QM_XQC_ADDR_OFFSET 32U
+#define QM_VF_AEQ_INT_MASK 0x0004
+#define QM_VF_EQ_INT_MASK 0x000c
+#define QM_IFC_INT_SOURCE_V 0x0020
+#define QM_IFC_INT_MASK 0x0024
+#define QM_IFC_INT_SET_V 0x002c
+#define QM_QUE_ISO_CFG_V 0x0030
+#define QM_PAGE_SIZE 0x0034
+
+#define QM_EQC_DW0 0X8000
+#define QM_AEQC_DW0 0X8020
+
+struct acc_vf_data {
+#define QM_MATCH_SIZE offsetofend(struct acc_vf_data, qm_rsv_state)
+ /* QM match information */
+#define ACC_DEV_MAGIC 0XCDCDCDCDFEEDAACC
+ u64 acc_magic;
+ u32 qp_num;
+ u32 dev_id;
+ u32 que_iso_cfg;
+ u32 qp_base;
+ u32 vf_qm_state;
+ /* QM reserved match information */
+ u32 qm_rsv_state[3];
+
+ /* QM RW regs */
+ u32 aeq_int_mask;
+ u32 eq_int_mask;
+ u32 ifc_int_source;
+ u32 ifc_int_mask;
+ u32 ifc_int_set;
+ u32 page_size;
+
+ /* QM_EQC_DW has 7 regs */
+ u32 qm_eqc_dw[7];
+
+ /* QM_AEQC_DW has 7 regs */
+ u32 qm_aeqc_dw[7];
+
+ /* QM reserved 5 regs */
+ u32 qm_rsv_regs[5];
+ u32 padding;
+ /* QM memory init information */
+ u64 eqe_dma;
+ u64 aeqe_dma;
+ u64 sqc_dma;
+ u64 cqc_dma;
+};
+
+struct hisi_acc_vf_migration_file {
+ struct file *filp;
+ struct mutex lock;
+ bool disabled;
+
+ struct acc_vf_data vf_data;
+ size_t total_length;
+};
+
+struct hisi_acc_vf_core_device {
+ struct vfio_pci_core_device core_device;
+ u8 deferred_reset:1;
+ /* For migration state */
+ struct mutex state_mutex;
+ enum vfio_device_mig_state mig_state;
+ struct pci_dev *pf_dev;
+ struct pci_dev *vf_dev;
+ struct hisi_qm *pf_qm;
+ struct hisi_qm vf_qm;
+ u32 vf_qm_state;
+ int vf_id;
+ /* For reset handler */
+ spinlock_t reset_lock;
+ struct hisi_acc_vf_migration_file *resuming_migf;
+ struct hisi_acc_vf_migration_file *saving_migf;
+};
+#endif /* HISI_ACC_VFIO_PCI_H */
diff --git a/drivers/vfio/pci/mlx5/Kconfig b/drivers/vfio/pci/mlx5/Kconfig
new file mode 100644
index 000000000..29ba9c504
--- /dev/null
+++ b/drivers/vfio/pci/mlx5/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MLX5_VFIO_PCI
+ tristate "VFIO support for MLX5 PCI devices"
+ depends on MLX5_CORE
+ depends on VFIO_PCI_CORE
+ help
+ This provides migration support for MLX5 devices using the VFIO
+ framework.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/pci/mlx5/Makefile b/drivers/vfio/pci/mlx5/Makefile
new file mode 100644
index 000000000..689627da7
--- /dev/null
+++ b/drivers/vfio/pci/mlx5/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MLX5_VFIO_PCI) += mlx5-vfio-pci.o
+mlx5-vfio-pci-y := main.o cmd.o
+
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
new file mode 100644
index 000000000..c604b7043
--- /dev/null
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -0,0 +1,1362 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include "cmd.h"
+
+enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
+
+static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
+ u16 *vhca_id);
+static void
+_mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev);
+
+int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
+{
+ u32 out[MLX5_ST_SZ_DW(suspend_vhca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(suspend_vhca_in)] = {};
+
+ lockdep_assert_held(&mvdev->state_mutex);
+ if (mvdev->mdev_detach)
+ return -ENOTCONN;
+
+ MLX5_SET(suspend_vhca_in, in, opcode, MLX5_CMD_OP_SUSPEND_VHCA);
+ MLX5_SET(suspend_vhca_in, in, vhca_id, mvdev->vhca_id);
+ MLX5_SET(suspend_vhca_in, in, op_mod, op_mod);
+
+ return mlx5_cmd_exec_inout(mvdev->mdev, suspend_vhca, in, out);
+}
+
+int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
+{
+ u32 out[MLX5_ST_SZ_DW(resume_vhca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(resume_vhca_in)] = {};
+
+ lockdep_assert_held(&mvdev->state_mutex);
+ if (mvdev->mdev_detach)
+ return -ENOTCONN;
+
+ MLX5_SET(resume_vhca_in, in, opcode, MLX5_CMD_OP_RESUME_VHCA);
+ MLX5_SET(resume_vhca_in, in, vhca_id, mvdev->vhca_id);
+ MLX5_SET(resume_vhca_in, in, op_mod, op_mod);
+
+ return mlx5_cmd_exec_inout(mvdev->mdev, resume_vhca, in, out);
+}
+
+int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
+ size_t *state_size)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vhca_migration_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vhca_migration_state_in)] = {};
+ int ret;
+
+ lockdep_assert_held(&mvdev->state_mutex);
+ if (mvdev->mdev_detach)
+ return -ENOTCONN;
+
+ MLX5_SET(query_vhca_migration_state_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE);
+ MLX5_SET(query_vhca_migration_state_in, in, vhca_id, mvdev->vhca_id);
+ MLX5_SET(query_vhca_migration_state_in, in, op_mod, 0);
+
+ ret = mlx5_cmd_exec_inout(mvdev->mdev, query_vhca_migration_state, in,
+ out);
+ if (ret)
+ return ret;
+
+ *state_size = MLX5_GET(query_vhca_migration_state_out, out,
+ required_umem_size);
+ return 0;
+}
+
+static void set_tracker_error(struct mlx5vf_pci_core_device *mvdev)
+{
+ /* Mark the tracker under an error and wake it up if it's running */
+ mvdev->tracker.is_err = true;
+ complete(&mvdev->tracker_comp);
+}
+
+static int mlx5fv_vf_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct mlx5vf_pci_core_device *mvdev =
+ container_of(nb, struct mlx5vf_pci_core_device, nb);
+
+ switch (event) {
+ case MLX5_PF_NOTIFY_ENABLE_VF:
+ mutex_lock(&mvdev->state_mutex);
+ mvdev->mdev_detach = false;
+ mlx5vf_state_mutex_unlock(mvdev);
+ break;
+ case MLX5_PF_NOTIFY_DISABLE_VF:
+ mlx5vf_cmd_close_migratable(mvdev);
+ mutex_lock(&mvdev->state_mutex);
+ mvdev->mdev_detach = true;
+ mlx5vf_state_mutex_unlock(mvdev);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev)
+{
+ if (!mvdev->migrate_cap)
+ return;
+
+ /* Must be done outside the lock to let it progress */
+ set_tracker_error(mvdev);
+ mutex_lock(&mvdev->state_mutex);
+ mlx5vf_disable_fds(mvdev);
+ _mlx5vf_free_page_tracker_resources(mvdev);
+ mlx5vf_state_mutex_unlock(mvdev);
+}
+
+void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
+{
+ if (!mvdev->migrate_cap)
+ return;
+
+ mlx5_sriov_blocking_notifier_unregister(mvdev->mdev, mvdev->vf_id,
+ &mvdev->nb);
+ destroy_workqueue(mvdev->cb_wq);
+}
+
+void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
+ const struct vfio_migration_ops *mig_ops,
+ const struct vfio_log_ops *log_ops)
+{
+ struct pci_dev *pdev = mvdev->core_device.pdev;
+ int ret;
+
+ if (!pdev->is_virtfn)
+ return;
+
+ mvdev->mdev = mlx5_vf_get_core_dev(pdev);
+ if (!mvdev->mdev)
+ return;
+
+ if (!MLX5_CAP_GEN(mvdev->mdev, migration))
+ goto end;
+
+ mvdev->vf_id = pci_iov_vf_id(pdev);
+ if (mvdev->vf_id < 0)
+ goto end;
+
+ if (mlx5vf_cmd_get_vhca_id(mvdev->mdev, mvdev->vf_id + 1,
+ &mvdev->vhca_id))
+ goto end;
+
+ mvdev->cb_wq = alloc_ordered_workqueue("mlx5vf_wq", 0);
+ if (!mvdev->cb_wq)
+ goto end;
+
+ mutex_init(&mvdev->state_mutex);
+ spin_lock_init(&mvdev->reset_lock);
+ mvdev->nb.notifier_call = mlx5fv_vf_event;
+ ret = mlx5_sriov_blocking_notifier_register(mvdev->mdev, mvdev->vf_id,
+ &mvdev->nb);
+ if (ret) {
+ destroy_workqueue(mvdev->cb_wq);
+ goto end;
+ }
+
+ mvdev->migrate_cap = 1;
+ mvdev->core_device.vdev.migration_flags =
+ VFIO_MIGRATION_STOP_COPY |
+ VFIO_MIGRATION_P2P;
+ mvdev->core_device.vdev.mig_ops = mig_ops;
+ init_completion(&mvdev->tracker_comp);
+ if (MLX5_CAP_GEN(mvdev->mdev, adv_virtualization))
+ mvdev->core_device.vdev.log_ops = log_ops;
+
+end:
+ mlx5_vf_put_core_dev(mvdev->mdev);
+}
+
+static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
+ u16 *vhca_id)
+{
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+ int out_size;
+ void *out;
+ int ret;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, other_function, 1);
+ MLX5_SET(query_hca_cap_in, in, function_id, function_id);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
+ HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ if (ret)
+ goto err_exec;
+
+ *vhca_id = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.vhca_id);
+
+err_exec:
+ kfree(out);
+ return ret;
+}
+
+static int _create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+ struct mlx5_vf_migration_file *migf,
+ struct mlx5_vhca_recv_buf *recv_buf,
+ u32 *mkey)
+{
+ size_t npages = migf ? DIV_ROUND_UP(migf->total_length, PAGE_SIZE) :
+ recv_buf->npages;
+ int err = 0, inlen;
+ __be64 *mtt;
+ void *mkc;
+ u32 *in;
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
+ sizeof(*mtt) * round_up(npages, 2);
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ DIV_ROUND_UP(npages, 2));
+ mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+
+ if (migf) {
+ struct sg_dma_page_iter dma_iter;
+
+ for_each_sgtable_dma_page(&migf->table.sgt, &dma_iter, 0)
+ *mtt++ = cpu_to_be64(sg_page_iter_dma_address(&dma_iter));
+ } else {
+ int i;
+
+ for (i = 0; i < npages; i++)
+ *mtt++ = cpu_to_be64(recv_buf->dma_addrs[i]);
+ }
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, rr, 1);
+ MLX5_SET(mkc, mkc, rw, 1);
+ MLX5_SET(mkc, mkc, pd, pdn);
+ MLX5_SET(mkc, mkc, bsf_octword_size, 0);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
+ MLX5_SET(mkc, mkc, translations_octword_size, DIV_ROUND_UP(npages, 2));
+ MLX5_SET64(mkc, mkc, len,
+ migf ? migf->total_length : (npages * PAGE_SIZE));
+ err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
+ kvfree(in);
+ return err;
+}
+
+void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work)
+{
+ struct mlx5vf_async_data *async_data = container_of(_work,
+ struct mlx5vf_async_data, work);
+ struct mlx5_vf_migration_file *migf = container_of(async_data,
+ struct mlx5_vf_migration_file, async_data);
+ struct mlx5_core_dev *mdev = migf->mvdev->mdev;
+
+ mutex_lock(&migf->lock);
+ if (async_data->status) {
+ migf->is_err = true;
+ wake_up_interruptible(&migf->poll_wait);
+ }
+ mutex_unlock(&migf->lock);
+
+ mlx5_core_destroy_mkey(mdev, async_data->mkey);
+ dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
+ mlx5_core_dealloc_pd(mdev, async_data->pdn);
+ kvfree(async_data->out);
+ fput(migf->filp);
+}
+
+static void mlx5vf_save_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5vf_async_data *async_data = container_of(context,
+ struct mlx5vf_async_data, cb_work);
+ struct mlx5_vf_migration_file *migf = container_of(async_data,
+ struct mlx5_vf_migration_file, async_data);
+
+ if (!status) {
+ WRITE_ONCE(migf->total_length,
+ MLX5_GET(save_vhca_state_out, async_data->out,
+ actual_image_size));
+ wake_up_interruptible(&migf->poll_wait);
+ }
+
+ /*
+ * The error and the cleanup flows can't run from an
+ * interrupt context
+ */
+ async_data->status = status;
+ queue_work(migf->mvdev->cb_wq, &async_data->work);
+}
+
+int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
+ struct mlx5_vf_migration_file *migf)
+{
+ u32 out_size = MLX5_ST_SZ_BYTES(save_vhca_state_out);
+ u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
+ struct mlx5vf_async_data *async_data;
+ struct mlx5_core_dev *mdev;
+ u32 pdn, mkey;
+ int err;
+
+ lockdep_assert_held(&mvdev->state_mutex);
+ if (mvdev->mdev_detach)
+ return -ENOTCONN;
+
+ mdev = mvdev->mdev;
+ err = mlx5_core_alloc_pd(mdev, &pdn);
+ if (err)
+ return err;
+
+ err = dma_map_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE,
+ 0);
+ if (err)
+ goto err_dma_map;
+
+ err = _create_mkey(mdev, pdn, migf, NULL, &mkey);
+ if (err)
+ goto err_create_mkey;
+
+ MLX5_SET(save_vhca_state_in, in, opcode,
+ MLX5_CMD_OP_SAVE_VHCA_STATE);
+ MLX5_SET(save_vhca_state_in, in, op_mod, 0);
+ MLX5_SET(save_vhca_state_in, in, vhca_id, mvdev->vhca_id);
+ MLX5_SET(save_vhca_state_in, in, mkey, mkey);
+ MLX5_SET(save_vhca_state_in, in, size, migf->total_length);
+
+ async_data = &migf->async_data;
+ async_data->out = kvzalloc(out_size, GFP_KERNEL);
+ if (!async_data->out) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ /* no data exists till the callback comes back */
+ migf->total_length = 0;
+ get_file(migf->filp);
+ async_data->mkey = mkey;
+ async_data->pdn = pdn;
+ err = mlx5_cmd_exec_cb(&migf->async_ctx, in, sizeof(in),
+ async_data->out,
+ out_size, mlx5vf_save_callback,
+ &async_data->cb_work);
+ if (err)
+ goto err_exec;
+
+ return 0;
+
+err_exec:
+ fput(migf->filp);
+ kvfree(async_data->out);
+err_out:
+ mlx5_core_destroy_mkey(mdev, mkey);
+err_create_mkey:
+ dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
+err_dma_map:
+ mlx5_core_dealloc_pd(mdev, pdn);
+ return err;
+}
+
+int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
+ struct mlx5_vf_migration_file *migf)
+{
+ struct mlx5_core_dev *mdev;
+ u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
+ u32 pdn, mkey;
+ int err;
+
+ lockdep_assert_held(&mvdev->state_mutex);
+ if (mvdev->mdev_detach)
+ return -ENOTCONN;
+
+ mutex_lock(&migf->lock);
+ if (!migf->total_length) {
+ err = -EINVAL;
+ goto end;
+ }
+
+ mdev = mvdev->mdev;
+ err = mlx5_core_alloc_pd(mdev, &pdn);
+ if (err)
+ goto end;
+
+ err = dma_map_sgtable(mdev->device, &migf->table.sgt, DMA_TO_DEVICE, 0);
+ if (err)
+ goto err_reg;
+
+ err = _create_mkey(mdev, pdn, migf, NULL, &mkey);
+ if (err)
+ goto err_mkey;
+
+ MLX5_SET(load_vhca_state_in, in, opcode,
+ MLX5_CMD_OP_LOAD_VHCA_STATE);
+ MLX5_SET(load_vhca_state_in, in, op_mod, 0);
+ MLX5_SET(load_vhca_state_in, in, vhca_id, mvdev->vhca_id);
+ MLX5_SET(load_vhca_state_in, in, mkey, mkey);
+ MLX5_SET(load_vhca_state_in, in, size, migf->total_length);
+
+ err = mlx5_cmd_exec_inout(mdev, load_vhca_state, in, out);
+
+ mlx5_core_destroy_mkey(mdev, mkey);
+err_mkey:
+ dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_TO_DEVICE, 0);
+err_reg:
+ mlx5_core_dealloc_pd(mdev, pdn);
+end:
+ mutex_unlock(&migf->lock);
+ return err;
+}
+
+static void combine_ranges(struct rb_root_cached *root, u32 cur_nodes,
+ u32 req_nodes)
+{
+ struct interval_tree_node *prev, *curr, *comb_start, *comb_end;
+ unsigned long min_gap;
+ unsigned long curr_gap;
+
+ /* Special shortcut when a single range is required */
+ if (req_nodes == 1) {
+ unsigned long last;
+
+ curr = comb_start = interval_tree_iter_first(root, 0, ULONG_MAX);
+ while (curr) {
+ last = curr->last;
+ prev = curr;
+ curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
+ if (prev != comb_start)
+ interval_tree_remove(prev, root);
+ }
+ comb_start->last = last;
+ return;
+ }
+
+ /* Combine ranges which have the smallest gap */
+ while (cur_nodes > req_nodes) {
+ prev = NULL;
+ min_gap = ULONG_MAX;
+ curr = interval_tree_iter_first(root, 0, ULONG_MAX);
+ while (curr) {
+ if (prev) {
+ curr_gap = curr->start - prev->last;
+ if (curr_gap < min_gap) {
+ min_gap = curr_gap;
+ comb_start = prev;
+ comb_end = curr;
+ }
+ }
+ prev = curr;
+ curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
+ }
+ comb_start->last = comb_end->last;
+ interval_tree_remove(comb_end, root);
+ cur_nodes--;
+ }
+}
+
+static int mlx5vf_create_tracker(struct mlx5_core_dev *mdev,
+ struct mlx5vf_pci_core_device *mvdev,
+ struct rb_root_cached *ranges, u32 nnodes)
+{
+ int max_num_range =
+ MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_max_num_range);
+ struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
+ int record_size = MLX5_ST_SZ_BYTES(page_track_range);
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ struct interval_tree_node *node = NULL;
+ u64 total_ranges_len = 0;
+ u32 num_ranges = nnodes;
+ u8 log_addr_space_size;
+ void *range_list_ptr;
+ void *obj_context;
+ void *cmd_hdr;
+ int inlen;
+ void *in;
+ int err;
+ int i;
+
+ if (num_ranges > max_num_range) {
+ combine_ranges(ranges, nnodes, max_num_range);
+ num_ranges = max_num_range;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_page_track_obj_in) +
+ record_size * num_ranges;
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cmd_hdr = MLX5_ADDR_OF(create_page_track_obj_in, in,
+ general_obj_in_cmd_hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type,
+ MLX5_OBJ_TYPE_PAGE_TRACK);
+ obj_context = MLX5_ADDR_OF(create_page_track_obj_in, in, obj_context);
+ MLX5_SET(page_track, obj_context, vhca_id, mvdev->vhca_id);
+ MLX5_SET(page_track, obj_context, track_type, 1);
+ MLX5_SET(page_track, obj_context, log_page_size,
+ ilog2(tracker->host_qp->tracked_page_size));
+ MLX5_SET(page_track, obj_context, log_msg_size,
+ ilog2(tracker->host_qp->max_msg_size));
+ MLX5_SET(page_track, obj_context, reporting_qpn, tracker->fw_qp->qpn);
+ MLX5_SET(page_track, obj_context, num_ranges, num_ranges);
+
+ range_list_ptr = MLX5_ADDR_OF(page_track, obj_context, track_range);
+ node = interval_tree_iter_first(ranges, 0, ULONG_MAX);
+ for (i = 0; i < num_ranges; i++) {
+ void *addr_range_i_base = range_list_ptr + record_size * i;
+ unsigned long length = node->last - node->start;
+
+ MLX5_SET64(page_track_range, addr_range_i_base, start_address,
+ node->start);
+ MLX5_SET64(page_track_range, addr_range_i_base, length, length);
+ total_ranges_len += length;
+ node = interval_tree_iter_next(node, 0, ULONG_MAX);
+ }
+
+ WARN_ON(node);
+ log_addr_space_size = ilog2(total_ranges_len);
+ if (log_addr_space_size <
+ (MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_min_addr_space)) ||
+ log_addr_space_size >
+ (MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_addr_space))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ MLX5_SET(page_track, obj_context, log_addr_space_size,
+ log_addr_space_size);
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (err)
+ goto out;
+
+ tracker->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ kfree(in);
+ return err;
+}
+
+static int mlx5vf_cmd_destroy_tracker(struct mlx5_core_dev *mdev,
+ u32 tracker_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_PAGE_TRACK);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, tracker_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5vf_cmd_modify_tracker(struct mlx5_core_dev *mdev,
+ u32 tracker_id, unsigned long iova,
+ unsigned long length, u32 tracker_state)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_page_track_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ void *obj_context;
+ void *cmd_hdr;
+
+ cmd_hdr = MLX5_ADDR_OF(modify_page_track_obj_in, in, general_obj_in_cmd_hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_PAGE_TRACK);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, tracker_id);
+
+ obj_context = MLX5_ADDR_OF(modify_page_track_obj_in, in, obj_context);
+ MLX5_SET64(page_track, obj_context, modify_field_select, 0x3);
+ MLX5_SET64(page_track, obj_context, range_start_address, iova);
+ MLX5_SET64(page_track, obj_context, length, length);
+ MLX5_SET(page_track, obj_context, state, tracker_state);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int alloc_cq_frag_buf(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_cq_buf *buf, int nent,
+ int cqe_size)
+{
+ struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
+ u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
+ u8 log_wq_sz = ilog2(cqe_size);
+ int err;
+
+ err = mlx5_frag_buf_alloc_node(mdev, nent * cqe_size, frag_buf,
+ mdev->priv.numa_node);
+ if (err)
+ return err;
+
+ mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
+ buf->cqe_size = cqe_size;
+ buf->nent = nent;
+ return 0;
+}
+
+static void init_cq_frag_buf(struct mlx5_vhca_cq_buf *buf)
+{
+ struct mlx5_cqe64 *cqe64;
+ void *cqe;
+ int i;
+
+ for (i = 0; i < buf->nent; i++) {
+ cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
+ cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
+ cqe64->op_own = MLX5_CQE_INVALID << 4;
+ }
+}
+
+static void mlx5vf_destroy_cq(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_cq *cq)
+{
+ mlx5_core_destroy_cq(mdev, &cq->mcq);
+ mlx5_frag_buf_free(mdev, &cq->buf.frag_buf);
+ mlx5_db_free(mdev, &cq->db);
+}
+
+static void mlx5vf_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
+{
+ if (type != MLX5_EVENT_TYPE_CQ_ERROR)
+ return;
+
+ set_tracker_error(container_of(mcq, struct mlx5vf_pci_core_device,
+ tracker.cq.mcq));
+}
+
+static int mlx5vf_event_notifier(struct notifier_block *nb, unsigned long type,
+ void *data)
+{
+ struct mlx5_vhca_page_tracker *tracker =
+ mlx5_nb_cof(nb, struct mlx5_vhca_page_tracker, nb);
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ tracker, struct mlx5vf_pci_core_device, tracker);
+ struct mlx5_eqe *eqe = data;
+ u8 event_type = (u8)type;
+ u8 queue_type;
+ int qp_num;
+
+ switch (event_type) {
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ queue_type = eqe->data.qp_srq.type;
+ if (queue_type != MLX5_EVENT_QUEUE_TYPE_QP)
+ break;
+ qp_num = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+ if (qp_num != tracker->host_qp->qpn &&
+ qp_num != tracker->fw_qp->qpn)
+ break;
+ set_tracker_error(mvdev);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static void mlx5vf_cq_complete(struct mlx5_core_cq *mcq,
+ struct mlx5_eqe *eqe)
+{
+ struct mlx5vf_pci_core_device *mvdev =
+ container_of(mcq, struct mlx5vf_pci_core_device,
+ tracker.cq.mcq);
+
+ complete(&mvdev->tracker_comp);
+}
+
+static int mlx5vf_create_cq(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_page_tracker *tracker,
+ size_t ncqe)
+{
+ int cqe_size = cache_line_size() == 128 ? 128 : 64;
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_vhca_cq *cq;
+ int inlen, err, eqn;
+ void *cqc, *in;
+ __be64 *pas;
+ int vector;
+
+ cq = &tracker->cq;
+ ncqe = roundup_pow_of_two(ncqe);
+ err = mlx5_db_alloc_node(mdev, &cq->db, mdev->priv.numa_node);
+ if (err)
+ return err;
+
+ cq->ncqe = ncqe;
+ cq->mcq.set_ci_db = cq->db.db;
+ cq->mcq.arm_db = cq->db.db + 1;
+ cq->mcq.cqe_sz = cqe_size;
+ err = alloc_cq_frag_buf(mdev, &cq->buf, ncqe, cqe_size);
+ if (err)
+ goto err_db_free;
+
+ init_cq_frag_buf(&cq->buf);
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
+ cq->buf.frag_buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_buff;
+ }
+
+ vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
+ err = mlx5_vector2eqn(mdev, vector, &eqn);
+ if (err)
+ goto err_vec;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe));
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, tracker->uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->buf.frag_buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
+ mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
+ cq->mcq.comp = mlx5vf_cq_complete;
+ cq->mcq.event = mlx5vf_cq_event;
+ err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
+ if (err)
+ goto err_vec;
+
+ mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, tracker->uar->map,
+ cq->mcq.cons_index);
+ kvfree(in);
+ return 0;
+
+err_vec:
+ kvfree(in);
+err_buff:
+ mlx5_frag_buf_free(mdev, &cq->buf.frag_buf);
+err_db_free:
+ mlx5_db_free(mdev, &cq->db);
+ return err;
+}
+
+static struct mlx5_vhca_qp *
+mlx5vf_create_rc_qp(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_page_tracker *tracker, u32 max_recv_wr)
+{
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ struct mlx5_vhca_qp *qp;
+ u8 log_rq_stride;
+ u8 log_rq_sz;
+ void *qpc;
+ int inlen;
+ void *in;
+ int err;
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ qp->rq.wqe_cnt = roundup_pow_of_two(max_recv_wr);
+ log_rq_stride = ilog2(MLX5_SEND_WQE_DS);
+ log_rq_sz = ilog2(qp->rq.wqe_cnt);
+ err = mlx5_db_alloc_node(mdev, &qp->db, mdev->priv.numa_node);
+ if (err)
+ goto err_free;
+
+ if (max_recv_wr) {
+ err = mlx5_frag_buf_alloc_node(mdev,
+ wq_get_byte_sz(log_rq_sz, log_rq_stride),
+ &qp->buf, mdev->priv.numa_node);
+ if (err)
+ goto err_db_free;
+ mlx5_init_fbc(qp->buf.frags, log_rq_stride, log_rq_sz, &qp->rq.fbc);
+ }
+
+ qp->rq.db = &qp->db.db[MLX5_RCV_DBR];
+ inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+ MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
+ qp->buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_in;
+ }
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, tracker->pdn);
+ MLX5_SET(qpc, qpc, uar_page, tracker->uar->index);
+ MLX5_SET(qpc, qpc, log_page_size,
+ qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
+ if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
+ MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ if (max_recv_wr) {
+ MLX5_SET(qpc, qpc, cqn_rcv, tracker->cq.mcq.cqn);
+ MLX5_SET(qpc, qpc, log_rq_stride, log_rq_stride - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, log_rq_sz);
+ MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+ mlx5_fill_page_frag_array(&qp->buf,
+ (__be64 *)MLX5_ADDR_OF(create_qp_in,
+ in, pas));
+ } else {
+ MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
+ }
+
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ kvfree(in);
+ if (err)
+ goto err_in;
+
+ qp->qpn = MLX5_GET(create_qp_out, out, qpn);
+ return qp;
+
+err_in:
+ if (max_recv_wr)
+ mlx5_frag_buf_free(mdev, &qp->buf);
+err_db_free:
+ mlx5_db_free(mdev, &qp->db);
+err_free:
+ kfree(qp);
+ return ERR_PTR(err);
+}
+
+static void mlx5vf_post_recv(struct mlx5_vhca_qp *qp)
+{
+ struct mlx5_wqe_data_seg *data;
+ unsigned int ix;
+
+ WARN_ON(qp->rq.pc - qp->rq.cc >= qp->rq.wqe_cnt);
+ ix = qp->rq.pc & (qp->rq.wqe_cnt - 1);
+ data = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ix);
+ data->byte_count = cpu_to_be32(qp->max_msg_size);
+ data->lkey = cpu_to_be32(qp->recv_buf.mkey);
+ data->addr = cpu_to_be64(qp->recv_buf.next_rq_offset);
+ qp->rq.pc++;
+ /* Make sure that descriptors are written before doorbell record. */
+ dma_wmb();
+ *qp->rq.db = cpu_to_be32(qp->rq.pc & 0xffff);
+}
+
+static int mlx5vf_activate_qp(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_qp *qp, u32 remote_qpn,
+ bool host_qp)
+{
+ u32 init_in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
+ u32 rtr_in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
+ u32 rts_in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
+ void *qpc;
+ int ret;
+
+ /* Init */
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, init_in, qpc);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QPC_PM_STATE_MIGRATED);
+ MLX5_SET(qpc, qpc, rre, 1);
+ MLX5_SET(qpc, qpc, rwe, 1);
+ MLX5_SET(rst2init_qp_in, init_in, opcode, MLX5_CMD_OP_RST2INIT_QP);
+ MLX5_SET(rst2init_qp_in, init_in, qpn, qp->qpn);
+ ret = mlx5_cmd_exec_in(mdev, rst2init_qp, init_in);
+ if (ret)
+ return ret;
+
+ if (host_qp) {
+ struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
+ int i;
+
+ for (i = 0; i < qp->rq.wqe_cnt; i++) {
+ mlx5vf_post_recv(qp);
+ recv_buf->next_rq_offset += qp->max_msg_size;
+ }
+ }
+
+ /* RTR */
+ qpc = MLX5_ADDR_OF(init2rtr_qp_in, rtr_in, qpc);
+ MLX5_SET(init2rtr_qp_in, rtr_in, qpn, qp->qpn);
+ MLX5_SET(qpc, qpc, mtu, IB_MTU_4096);
+ MLX5_SET(qpc, qpc, log_msg_max, MLX5_CAP_GEN(mdev, log_max_msg));
+ MLX5_SET(qpc, qpc, remote_qpn, remote_qpn);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
+ MLX5_SET(qpc, qpc, primary_address_path.fl, 1);
+ MLX5_SET(qpc, qpc, min_rnr_nak, 1);
+ MLX5_SET(init2rtr_qp_in, rtr_in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
+ MLX5_SET(init2rtr_qp_in, rtr_in, qpn, qp->qpn);
+ ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, rtr_in);
+ if (ret || host_qp)
+ return ret;
+
+ /* RTS */
+ qpc = MLX5_ADDR_OF(rtr2rts_qp_in, rts_in, qpc);
+ MLX5_SET(rtr2rts_qp_in, rts_in, qpn, qp->qpn);
+ MLX5_SET(qpc, qpc, retry_count, 7);
+ MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
+ MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */
+ MLX5_SET(rtr2rts_qp_in, rts_in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
+ MLX5_SET(rtr2rts_qp_in, rts_in, qpn, qp->qpn);
+
+ return mlx5_cmd_exec_in(mdev, rtr2rts_qp, rts_in);
+}
+
+static void mlx5vf_destroy_qp(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_qp *qp)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ mlx5_cmd_exec_in(mdev, destroy_qp, in);
+
+ mlx5_frag_buf_free(mdev, &qp->buf);
+ mlx5_db_free(mdev, &qp->db);
+ kfree(qp);
+}
+
+static void free_recv_pages(struct mlx5_vhca_recv_buf *recv_buf)
+{
+ int i;
+
+ /* Undo alloc_pages_bulk_array() */
+ for (i = 0; i < recv_buf->npages; i++)
+ __free_page(recv_buf->page_list[i]);
+
+ kvfree(recv_buf->page_list);
+}
+
+static int alloc_recv_pages(struct mlx5_vhca_recv_buf *recv_buf,
+ unsigned int npages)
+{
+ unsigned int filled = 0, done = 0;
+ int i;
+
+ recv_buf->page_list = kvcalloc(npages, sizeof(*recv_buf->page_list),
+ GFP_KERNEL);
+ if (!recv_buf->page_list)
+ return -ENOMEM;
+
+ for (;;) {
+ filled = alloc_pages_bulk_array(GFP_KERNEL, npages - done,
+ recv_buf->page_list + done);
+ if (!filled)
+ goto err;
+
+ done += filled;
+ if (done == npages)
+ break;
+ }
+
+ recv_buf->npages = npages;
+ return 0;
+
+err:
+ for (i = 0; i < npages; i++) {
+ if (recv_buf->page_list[i])
+ __free_page(recv_buf->page_list[i]);
+ }
+
+ kvfree(recv_buf->page_list);
+ return -ENOMEM;
+}
+
+static int register_dma_recv_pages(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_recv_buf *recv_buf)
+{
+ int i, j;
+
+ recv_buf->dma_addrs = kvcalloc(recv_buf->npages,
+ sizeof(*recv_buf->dma_addrs),
+ GFP_KERNEL);
+ if (!recv_buf->dma_addrs)
+ return -ENOMEM;
+
+ for (i = 0; i < recv_buf->npages; i++) {
+ recv_buf->dma_addrs[i] = dma_map_page(mdev->device,
+ recv_buf->page_list[i],
+ 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(mdev->device, recv_buf->dma_addrs[i]))
+ goto error;
+ }
+ return 0;
+
+error:
+ for (j = 0; j < i; j++)
+ dma_unmap_single(mdev->device, recv_buf->dma_addrs[j],
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ kvfree(recv_buf->dma_addrs);
+ return -ENOMEM;
+}
+
+static void unregister_dma_recv_pages(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_recv_buf *recv_buf)
+{
+ int i;
+
+ for (i = 0; i < recv_buf->npages; i++)
+ dma_unmap_single(mdev->device, recv_buf->dma_addrs[i],
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ kvfree(recv_buf->dma_addrs);
+}
+
+static void mlx5vf_free_qp_recv_resources(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_qp *qp)
+{
+ struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
+
+ mlx5_core_destroy_mkey(mdev, recv_buf->mkey);
+ unregister_dma_recv_pages(mdev, recv_buf);
+ free_recv_pages(&qp->recv_buf);
+}
+
+static int mlx5vf_alloc_qp_recv_resources(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_qp *qp, u32 pdn,
+ u64 rq_size)
+{
+ unsigned int npages = DIV_ROUND_UP_ULL(rq_size, PAGE_SIZE);
+ struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
+ int err;
+
+ err = alloc_recv_pages(recv_buf, npages);
+ if (err < 0)
+ return err;
+
+ err = register_dma_recv_pages(mdev, recv_buf);
+ if (err)
+ goto end;
+
+ err = _create_mkey(mdev, pdn, NULL, recv_buf, &recv_buf->mkey);
+ if (err)
+ goto err_create_mkey;
+
+ return 0;
+
+err_create_mkey:
+ unregister_dma_recv_pages(mdev, recv_buf);
+end:
+ free_recv_pages(recv_buf);
+ return err;
+}
+
+static void
+_mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev)
+{
+ struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
+ struct mlx5_core_dev *mdev = mvdev->mdev;
+
+ lockdep_assert_held(&mvdev->state_mutex);
+
+ if (!mvdev->log_active)
+ return;
+
+ WARN_ON(mvdev->mdev_detach);
+
+ mlx5_eq_notifier_unregister(mdev, &tracker->nb);
+ mlx5vf_cmd_destroy_tracker(mdev, tracker->id);
+ mlx5vf_destroy_qp(mdev, tracker->fw_qp);
+ mlx5vf_free_qp_recv_resources(mdev, tracker->host_qp);
+ mlx5vf_destroy_qp(mdev, tracker->host_qp);
+ mlx5vf_destroy_cq(mdev, &tracker->cq);
+ mlx5_core_dealloc_pd(mdev, tracker->pdn);
+ mlx5_put_uars_page(mdev, tracker->uar);
+ mvdev->log_active = false;
+}
+
+int mlx5vf_stop_page_tracker(struct vfio_device *vdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+
+ mutex_lock(&mvdev->state_mutex);
+ if (!mvdev->log_active)
+ goto end;
+
+ _mlx5vf_free_page_tracker_resources(mvdev);
+ mvdev->log_active = false;
+end:
+ mlx5vf_state_mutex_unlock(mvdev);
+ return 0;
+}
+
+int mlx5vf_start_page_tracker(struct vfio_device *vdev,
+ struct rb_root_cached *ranges, u32 nnodes,
+ u64 *page_size)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+ struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
+ u8 log_tracked_page = ilog2(*page_size);
+ struct mlx5_vhca_qp *host_qp;
+ struct mlx5_vhca_qp *fw_qp;
+ struct mlx5_core_dev *mdev;
+ u32 max_msg_size = PAGE_SIZE;
+ u64 rq_size = SZ_2M;
+ u32 max_recv_wr;
+ int err;
+
+ mutex_lock(&mvdev->state_mutex);
+ if (mvdev->mdev_detach) {
+ err = -ENOTCONN;
+ goto end;
+ }
+
+ if (mvdev->log_active) {
+ err = -EINVAL;
+ goto end;
+ }
+
+ mdev = mvdev->mdev;
+ memset(tracker, 0, sizeof(*tracker));
+ tracker->uar = mlx5_get_uars_page(mdev);
+ if (IS_ERR(tracker->uar)) {
+ err = PTR_ERR(tracker->uar);
+ goto end;
+ }
+
+ err = mlx5_core_alloc_pd(mdev, &tracker->pdn);
+ if (err)
+ goto err_uar;
+
+ max_recv_wr = DIV_ROUND_UP_ULL(rq_size, max_msg_size);
+ err = mlx5vf_create_cq(mdev, tracker, max_recv_wr);
+ if (err)
+ goto err_dealloc_pd;
+
+ host_qp = mlx5vf_create_rc_qp(mdev, tracker, max_recv_wr);
+ if (IS_ERR(host_qp)) {
+ err = PTR_ERR(host_qp);
+ goto err_cq;
+ }
+
+ host_qp->max_msg_size = max_msg_size;
+ if (log_tracked_page < MLX5_CAP_ADV_VIRTUALIZATION(mdev,
+ pg_track_log_min_page_size)) {
+ log_tracked_page = MLX5_CAP_ADV_VIRTUALIZATION(mdev,
+ pg_track_log_min_page_size);
+ } else if (log_tracked_page > MLX5_CAP_ADV_VIRTUALIZATION(mdev,
+ pg_track_log_max_page_size)) {
+ log_tracked_page = MLX5_CAP_ADV_VIRTUALIZATION(mdev,
+ pg_track_log_max_page_size);
+ }
+
+ host_qp->tracked_page_size = (1ULL << log_tracked_page);
+ err = mlx5vf_alloc_qp_recv_resources(mdev, host_qp, tracker->pdn,
+ rq_size);
+ if (err)
+ goto err_host_qp;
+
+ fw_qp = mlx5vf_create_rc_qp(mdev, tracker, 0);
+ if (IS_ERR(fw_qp)) {
+ err = PTR_ERR(fw_qp);
+ goto err_recv_resources;
+ }
+
+ err = mlx5vf_activate_qp(mdev, host_qp, fw_qp->qpn, true);
+ if (err)
+ goto err_activate;
+
+ err = mlx5vf_activate_qp(mdev, fw_qp, host_qp->qpn, false);
+ if (err)
+ goto err_activate;
+
+ tracker->host_qp = host_qp;
+ tracker->fw_qp = fw_qp;
+ err = mlx5vf_create_tracker(mdev, mvdev, ranges, nnodes);
+ if (err)
+ goto err_activate;
+
+ MLX5_NB_INIT(&tracker->nb, mlx5vf_event_notifier, NOTIFY_ANY);
+ mlx5_eq_notifier_register(mdev, &tracker->nb);
+ *page_size = host_qp->tracked_page_size;
+ mvdev->log_active = true;
+ mlx5vf_state_mutex_unlock(mvdev);
+ return 0;
+
+err_activate:
+ mlx5vf_destroy_qp(mdev, fw_qp);
+err_recv_resources:
+ mlx5vf_free_qp_recv_resources(mdev, host_qp);
+err_host_qp:
+ mlx5vf_destroy_qp(mdev, host_qp);
+err_cq:
+ mlx5vf_destroy_cq(mdev, &tracker->cq);
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(mdev, tracker->pdn);
+err_uar:
+ mlx5_put_uars_page(mdev, tracker->uar);
+end:
+ mlx5vf_state_mutex_unlock(mvdev);
+ return err;
+}
+
+static void
+set_report_output(u32 size, int index, struct mlx5_vhca_qp *qp,
+ struct iova_bitmap *dirty)
+{
+ u32 entry_size = MLX5_ST_SZ_BYTES(page_track_report_entry);
+ u32 nent = size / entry_size;
+ struct page *page;
+ u64 addr;
+ u64 *buf;
+ int i;
+
+ if (WARN_ON(index >= qp->recv_buf.npages ||
+ (nent > qp->max_msg_size / entry_size)))
+ return;
+
+ page = qp->recv_buf.page_list[index];
+ buf = kmap_local_page(page);
+ for (i = 0; i < nent; i++) {
+ addr = MLX5_GET(page_track_report_entry, buf + i,
+ dirty_address_low);
+ addr |= (u64)MLX5_GET(page_track_report_entry, buf + i,
+ dirty_address_high) << 32;
+ iova_bitmap_set(dirty, addr, qp->tracked_page_size);
+ }
+ kunmap_local(buf);
+}
+
+static void
+mlx5vf_rq_cqe(struct mlx5_vhca_qp *qp, struct mlx5_cqe64 *cqe,
+ struct iova_bitmap *dirty, int *tracker_status)
+{
+ u32 size;
+ int ix;
+
+ qp->rq.cc++;
+ *tracker_status = be32_to_cpu(cqe->immediate) >> 28;
+ size = be32_to_cpu(cqe->byte_cnt);
+ ix = be16_to_cpu(cqe->wqe_counter) & (qp->rq.wqe_cnt - 1);
+
+ /* zero length CQE, no data */
+ WARN_ON(!size && *tracker_status == MLX5_PAGE_TRACK_STATE_REPORTING);
+ if (size)
+ set_report_output(size, ix, qp, dirty);
+
+ qp->recv_buf.next_rq_offset = ix * qp->max_msg_size;
+ mlx5vf_post_recv(qp);
+}
+
+static void *get_cqe(struct mlx5_vhca_cq *cq, int n)
+{
+ return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
+}
+
+static struct mlx5_cqe64 *get_sw_cqe(struct mlx5_vhca_cq *cq, int n)
+{
+ void *cqe = get_cqe(cq, n & (cq->ncqe - 1));
+ struct mlx5_cqe64 *cqe64;
+
+ cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
+
+ if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
+ !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ncqe)))) {
+ return cqe64;
+ } else {
+ return NULL;
+ }
+}
+
+static int
+mlx5vf_cq_poll_one(struct mlx5_vhca_cq *cq, struct mlx5_vhca_qp *qp,
+ struct iova_bitmap *dirty, int *tracker_status)
+{
+ struct mlx5_cqe64 *cqe;
+ u8 opcode;
+
+ cqe = get_sw_cqe(cq, cq->mcq.cons_index);
+ if (!cqe)
+ return CQ_EMPTY;
+
+ ++cq->mcq.cons_index;
+ /*
+ * Make sure we read CQ entry contents after we've checked the
+ * ownership bit.
+ */
+ rmb();
+ opcode = get_cqe_opcode(cqe);
+ switch (opcode) {
+ case MLX5_CQE_RESP_SEND_IMM:
+ mlx5vf_rq_cqe(qp, cqe, dirty, tracker_status);
+ return CQ_OK;
+ default:
+ return CQ_POLL_ERR;
+ }
+}
+
+int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
+ unsigned long length,
+ struct iova_bitmap *dirty)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+ struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
+ struct mlx5_vhca_cq *cq = &tracker->cq;
+ struct mlx5_core_dev *mdev;
+ int poll_err, err;
+
+ mutex_lock(&mvdev->state_mutex);
+ if (!mvdev->log_active) {
+ err = -EINVAL;
+ goto end;
+ }
+
+ if (mvdev->mdev_detach) {
+ err = -ENOTCONN;
+ goto end;
+ }
+
+ mdev = mvdev->mdev;
+ err = mlx5vf_cmd_modify_tracker(mdev, tracker->id, iova, length,
+ MLX5_PAGE_TRACK_STATE_REPORTING);
+ if (err)
+ goto end;
+
+ tracker->status = MLX5_PAGE_TRACK_STATE_REPORTING;
+ while (tracker->status == MLX5_PAGE_TRACK_STATE_REPORTING &&
+ !tracker->is_err) {
+ poll_err = mlx5vf_cq_poll_one(cq, tracker->host_qp, dirty,
+ &tracker->status);
+ if (poll_err == CQ_EMPTY) {
+ mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, tracker->uar->map,
+ cq->mcq.cons_index);
+ poll_err = mlx5vf_cq_poll_one(cq, tracker->host_qp,
+ dirty, &tracker->status);
+ if (poll_err == CQ_EMPTY) {
+ wait_for_completion(&mvdev->tracker_comp);
+ continue;
+ }
+ }
+ if (poll_err == CQ_POLL_ERR) {
+ err = -EIO;
+ goto end;
+ }
+ mlx5_cq_set_ci(&cq->mcq);
+ }
+
+ if (tracker->status == MLX5_PAGE_TRACK_STATE_ERROR)
+ tracker->is_err = true;
+
+ if (tracker->is_err)
+ err = -EIO;
+end:
+ mlx5vf_state_mutex_unlock(mvdev);
+ return err;
+}
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
new file mode 100644
index 000000000..921d5720a
--- /dev/null
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ */
+
+#ifndef MLX5_VFIO_CMD_H
+#define MLX5_VFIO_CMD_H
+
+#include <linux/kernel.h>
+#include <linux/vfio_pci_core.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/qp.h>
+
+struct mlx5vf_async_data {
+ struct mlx5_async_work cb_work;
+ struct work_struct work;
+ int status;
+ u32 pdn;
+ u32 mkey;
+ void *out;
+};
+
+struct mlx5_vf_migration_file {
+ struct file *filp;
+ struct mutex lock;
+ u8 disabled:1;
+ u8 is_err:1;
+
+ struct sg_append_table table;
+ size_t total_length;
+ size_t allocated_length;
+
+ /* Optimize mlx5vf_get_migration_page() for sequential access */
+ struct scatterlist *last_offset_sg;
+ unsigned int sg_last_entry;
+ unsigned long last_offset;
+ struct mlx5vf_pci_core_device *mvdev;
+ wait_queue_head_t poll_wait;
+ struct mlx5_async_ctx async_ctx;
+ struct mlx5vf_async_data async_data;
+};
+
+struct mlx5_vhca_cq_buf {
+ struct mlx5_frag_buf_ctrl fbc;
+ struct mlx5_frag_buf frag_buf;
+ int cqe_size;
+ int nent;
+};
+
+struct mlx5_vhca_cq {
+ struct mlx5_vhca_cq_buf buf;
+ struct mlx5_db db;
+ struct mlx5_core_cq mcq;
+ size_t ncqe;
+};
+
+struct mlx5_vhca_recv_buf {
+ u32 npages;
+ struct page **page_list;
+ dma_addr_t *dma_addrs;
+ u32 next_rq_offset;
+ u32 mkey;
+};
+
+struct mlx5_vhca_qp {
+ struct mlx5_frag_buf buf;
+ struct mlx5_db db;
+ struct mlx5_vhca_recv_buf recv_buf;
+ u32 tracked_page_size;
+ u32 max_msg_size;
+ u32 qpn;
+ struct {
+ unsigned int pc;
+ unsigned int cc;
+ unsigned int wqe_cnt;
+ __be32 *db;
+ struct mlx5_frag_buf_ctrl fbc;
+ } rq;
+};
+
+struct mlx5_vhca_page_tracker {
+ u32 id;
+ u32 pdn;
+ u8 is_err:1;
+ struct mlx5_uars_page *uar;
+ struct mlx5_vhca_cq cq;
+ struct mlx5_vhca_qp *host_qp;
+ struct mlx5_vhca_qp *fw_qp;
+ struct mlx5_nb nb;
+ int status;
+};
+
+struct mlx5vf_pci_core_device {
+ struct vfio_pci_core_device core_device;
+ int vf_id;
+ u16 vhca_id;
+ u8 migrate_cap:1;
+ u8 deferred_reset:1;
+ u8 mdev_detach:1;
+ u8 log_active:1;
+ struct completion tracker_comp;
+ /* protect migration state */
+ struct mutex state_mutex;
+ enum vfio_device_mig_state mig_state;
+ /* protect the reset_done flow */
+ spinlock_t reset_lock;
+ struct mlx5_vf_migration_file *resuming_migf;
+ struct mlx5_vf_migration_file *saving_migf;
+ struct mlx5_vhca_page_tracker tracker;
+ struct workqueue_struct *cb_wq;
+ struct notifier_block nb;
+ struct mlx5_core_dev *mdev;
+};
+
+int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
+int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
+int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
+ size_t *state_size);
+void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
+ const struct vfio_migration_ops *mig_ops,
+ const struct vfio_log_ops *log_ops);
+void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev);
+int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
+ struct mlx5_vf_migration_file *migf);
+int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
+ struct mlx5_vf_migration_file *migf);
+void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work);
+int mlx5vf_start_page_tracker(struct vfio_device *vdev,
+ struct rb_root_cached *ranges, u32 nnodes, u64 *page_size);
+int mlx5vf_stop_page_tracker(struct vfio_device *vdev);
+int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
+ unsigned long length, struct iova_bitmap *dirty);
+#endif /* MLX5_VFIO_CMD_H */
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
new file mode 100644
index 000000000..fd6ccb845
--- /dev/null
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/file.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+#include <linux/sched/mm.h>
+#include <linux/anon_inodes.h>
+
+#include "cmd.h"
+
+/* Arbitrary to prevent userspace from consuming endless memory */
+#define MAX_MIGRATION_SIZE (512*1024*1024)
+
+static struct mlx5vf_pci_core_device *mlx5vf_drvdata(struct pci_dev *pdev)
+{
+ struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
+
+ return container_of(core_device, struct mlx5vf_pci_core_device,
+ core_device);
+}
+
+static struct page *
+mlx5vf_get_migration_page(struct mlx5_vf_migration_file *migf,
+ unsigned long offset)
+{
+ unsigned long cur_offset = 0;
+ struct scatterlist *sg;
+ unsigned int i;
+
+ /* All accesses are sequential */
+ if (offset < migf->last_offset || !migf->last_offset_sg) {
+ migf->last_offset = 0;
+ migf->last_offset_sg = migf->table.sgt.sgl;
+ migf->sg_last_entry = 0;
+ }
+
+ cur_offset = migf->last_offset;
+
+ for_each_sg(migf->last_offset_sg, sg,
+ migf->table.sgt.orig_nents - migf->sg_last_entry, i) {
+ if (offset < sg->length + cur_offset) {
+ migf->last_offset_sg = sg;
+ migf->sg_last_entry += i;
+ migf->last_offset = cur_offset;
+ return nth_page(sg_page(sg),
+ (offset - cur_offset) / PAGE_SIZE);
+ }
+ cur_offset += sg->length;
+ }
+ return NULL;
+}
+
+static int mlx5vf_add_migration_pages(struct mlx5_vf_migration_file *migf,
+ unsigned int npages)
+{
+ unsigned int to_alloc = npages;
+ struct page **page_list;
+ unsigned long filled;
+ unsigned int to_fill;
+ int ret;
+
+ to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list));
+ page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ do {
+ filled = alloc_pages_bulk_array(GFP_KERNEL, to_fill, page_list);
+ if (!filled) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ to_alloc -= filled;
+ ret = sg_alloc_append_table_from_pages(
+ &migf->table, page_list, filled, 0,
+ filled << PAGE_SHIFT, UINT_MAX, SG_MAX_SINGLE_ALLOC,
+ GFP_KERNEL);
+
+ if (ret)
+ goto err;
+ migf->allocated_length += filled * PAGE_SIZE;
+ /* clean input for another bulk allocation */
+ memset(page_list, 0, filled * sizeof(*page_list));
+ to_fill = min_t(unsigned int, to_alloc,
+ PAGE_SIZE / sizeof(*page_list));
+ } while (to_alloc > 0);
+
+ kvfree(page_list);
+ return 0;
+
+err:
+ kvfree(page_list);
+ return ret;
+}
+
+static void mlx5vf_disable_fd(struct mlx5_vf_migration_file *migf)
+{
+ struct sg_page_iter sg_iter;
+
+ mutex_lock(&migf->lock);
+ /* Undo alloc_pages_bulk_array() */
+ for_each_sgtable_page(&migf->table.sgt, &sg_iter, 0)
+ __free_page(sg_page_iter_page(&sg_iter));
+ sg_free_append_table(&migf->table);
+ migf->disabled = true;
+ migf->total_length = 0;
+ migf->allocated_length = 0;
+ migf->filp->f_pos = 0;
+ mutex_unlock(&migf->lock);
+}
+
+static int mlx5vf_release_file(struct inode *inode, struct file *filp)
+{
+ struct mlx5_vf_migration_file *migf = filp->private_data;
+
+ mlx5vf_disable_fd(migf);
+ mutex_destroy(&migf->lock);
+ kfree(migf);
+ return 0;
+}
+
+static ssize_t mlx5vf_save_read(struct file *filp, char __user *buf, size_t len,
+ loff_t *pos)
+{
+ struct mlx5_vf_migration_file *migf = filp->private_data;
+ ssize_t done = 0;
+
+ if (pos)
+ return -ESPIPE;
+ pos = &filp->f_pos;
+
+ if (!(filp->f_flags & O_NONBLOCK)) {
+ if (wait_event_interruptible(migf->poll_wait,
+ READ_ONCE(migf->total_length) || migf->is_err))
+ return -ERESTARTSYS;
+ }
+
+ mutex_lock(&migf->lock);
+ if ((filp->f_flags & O_NONBLOCK) && !READ_ONCE(migf->total_length)) {
+ done = -EAGAIN;
+ goto out_unlock;
+ }
+ if (*pos > migf->total_length) {
+ done = -EINVAL;
+ goto out_unlock;
+ }
+ if (migf->disabled || migf->is_err) {
+ done = -ENODEV;
+ goto out_unlock;
+ }
+
+ len = min_t(size_t, migf->total_length - *pos, len);
+ while (len) {
+ size_t page_offset;
+ struct page *page;
+ size_t page_len;
+ u8 *from_buff;
+ int ret;
+
+ page_offset = (*pos) % PAGE_SIZE;
+ page = mlx5vf_get_migration_page(migf, *pos - page_offset);
+ if (!page) {
+ if (done == 0)
+ done = -EINVAL;
+ goto out_unlock;
+ }
+
+ page_len = min_t(size_t, len, PAGE_SIZE - page_offset);
+ from_buff = kmap_local_page(page);
+ ret = copy_to_user(buf, from_buff + page_offset, page_len);
+ kunmap_local(from_buff);
+ if (ret) {
+ done = -EFAULT;
+ goto out_unlock;
+ }
+ *pos += page_len;
+ len -= page_len;
+ done += page_len;
+ buf += page_len;
+ }
+
+out_unlock:
+ mutex_unlock(&migf->lock);
+ return done;
+}
+
+static __poll_t mlx5vf_save_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct mlx5_vf_migration_file *migf = filp->private_data;
+ __poll_t pollflags = 0;
+
+ poll_wait(filp, &migf->poll_wait, wait);
+
+ mutex_lock(&migf->lock);
+ if (migf->disabled || migf->is_err)
+ pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+ else if (READ_ONCE(migf->total_length))
+ pollflags = EPOLLIN | EPOLLRDNORM;
+ mutex_unlock(&migf->lock);
+
+ return pollflags;
+}
+
+static const struct file_operations mlx5vf_save_fops = {
+ .owner = THIS_MODULE,
+ .read = mlx5vf_save_read,
+ .poll = mlx5vf_save_poll,
+ .release = mlx5vf_release_file,
+ .llseek = no_llseek,
+};
+
+static struct mlx5_vf_migration_file *
+mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev)
+{
+ struct mlx5_vf_migration_file *migf;
+ int ret;
+
+ migf = kzalloc(sizeof(*migf), GFP_KERNEL);
+ if (!migf)
+ return ERR_PTR(-ENOMEM);
+
+ migf->filp = anon_inode_getfile("mlx5vf_mig", &mlx5vf_save_fops, migf,
+ O_RDONLY);
+ if (IS_ERR(migf->filp)) {
+ int err = PTR_ERR(migf->filp);
+
+ kfree(migf);
+ return ERR_PTR(err);
+ }
+
+ stream_open(migf->filp->f_inode, migf->filp);
+ mutex_init(&migf->lock);
+ init_waitqueue_head(&migf->poll_wait);
+ mlx5_cmd_init_async_ctx(mvdev->mdev, &migf->async_ctx);
+ INIT_WORK(&migf->async_data.work, mlx5vf_mig_file_cleanup_cb);
+ ret = mlx5vf_cmd_query_vhca_migration_state(mvdev,
+ &migf->total_length);
+ if (ret)
+ goto out_free;
+
+ ret = mlx5vf_add_migration_pages(
+ migf, DIV_ROUND_UP_ULL(migf->total_length, PAGE_SIZE));
+ if (ret)
+ goto out_free;
+
+ migf->mvdev = mvdev;
+ ret = mlx5vf_cmd_save_vhca_state(mvdev, migf);
+ if (ret)
+ goto out_free;
+ return migf;
+out_free:
+ fput(migf->filp);
+ return ERR_PTR(ret);
+}
+
+static ssize_t mlx5vf_resume_write(struct file *filp, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct mlx5_vf_migration_file *migf = filp->private_data;
+ loff_t requested_length;
+ ssize_t done = 0;
+
+ if (pos)
+ return -ESPIPE;
+ pos = &filp->f_pos;
+
+ if (*pos < 0 ||
+ check_add_overflow((loff_t)len, *pos, &requested_length))
+ return -EINVAL;
+
+ if (requested_length > MAX_MIGRATION_SIZE)
+ return -ENOMEM;
+
+ mutex_lock(&migf->lock);
+ if (migf->disabled) {
+ done = -ENODEV;
+ goto out_unlock;
+ }
+
+ if (migf->allocated_length < requested_length) {
+ done = mlx5vf_add_migration_pages(
+ migf,
+ DIV_ROUND_UP(requested_length - migf->allocated_length,
+ PAGE_SIZE));
+ if (done)
+ goto out_unlock;
+ }
+
+ while (len) {
+ size_t page_offset;
+ struct page *page;
+ size_t page_len;
+ u8 *to_buff;
+ int ret;
+
+ page_offset = (*pos) % PAGE_SIZE;
+ page = mlx5vf_get_migration_page(migf, *pos - page_offset);
+ if (!page) {
+ if (done == 0)
+ done = -EINVAL;
+ goto out_unlock;
+ }
+
+ page_len = min_t(size_t, len, PAGE_SIZE - page_offset);
+ to_buff = kmap_local_page(page);
+ ret = copy_from_user(to_buff + page_offset, buf, page_len);
+ kunmap_local(to_buff);
+ if (ret) {
+ done = -EFAULT;
+ goto out_unlock;
+ }
+ *pos += page_len;
+ len -= page_len;
+ done += page_len;
+ buf += page_len;
+ migf->total_length += page_len;
+ }
+out_unlock:
+ mutex_unlock(&migf->lock);
+ return done;
+}
+
+static const struct file_operations mlx5vf_resume_fops = {
+ .owner = THIS_MODULE,
+ .write = mlx5vf_resume_write,
+ .release = mlx5vf_release_file,
+ .llseek = no_llseek,
+};
+
+static struct mlx5_vf_migration_file *
+mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev)
+{
+ struct mlx5_vf_migration_file *migf;
+
+ migf = kzalloc(sizeof(*migf), GFP_KERNEL);
+ if (!migf)
+ return ERR_PTR(-ENOMEM);
+
+ migf->filp = anon_inode_getfile("mlx5vf_mig", &mlx5vf_resume_fops, migf,
+ O_WRONLY);
+ if (IS_ERR(migf->filp)) {
+ int err = PTR_ERR(migf->filp);
+
+ kfree(migf);
+ return ERR_PTR(err);
+ }
+ stream_open(migf->filp->f_inode, migf->filp);
+ mutex_init(&migf->lock);
+ return migf;
+}
+
+void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
+{
+ if (mvdev->resuming_migf) {
+ mlx5vf_disable_fd(mvdev->resuming_migf);
+ fput(mvdev->resuming_migf->filp);
+ mvdev->resuming_migf = NULL;
+ }
+ if (mvdev->saving_migf) {
+ mlx5_cmd_cleanup_async_ctx(&mvdev->saving_migf->async_ctx);
+ cancel_work_sync(&mvdev->saving_migf->async_data.work);
+ mlx5vf_disable_fd(mvdev->saving_migf);
+ fput(mvdev->saving_migf->filp);
+ mvdev->saving_migf = NULL;
+ }
+}
+
+static struct file *
+mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
+ u32 new)
+{
+ u32 cur = mvdev->mig_state;
+ int ret;
+
+ if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) {
+ ret = mlx5vf_cmd_suspend_vhca(mvdev,
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
+ ret = mlx5vf_cmd_resume_vhca(mvdev,
+ MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
+ ret = mlx5vf_cmd_suspend_vhca(mvdev,
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) {
+ ret = mlx5vf_cmd_resume_vhca(mvdev,
+ MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
+ struct mlx5_vf_migration_file *migf;
+
+ migf = mlx5vf_pci_save_device_data(mvdev);
+ if (IS_ERR(migf))
+ return ERR_CAST(migf);
+ get_file(migf->filp);
+ mvdev->saving_migf = migf;
+ return migf->filp;
+ }
+
+ if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) {
+ mlx5vf_disable_fds(mvdev);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
+ struct mlx5_vf_migration_file *migf;
+
+ migf = mlx5vf_pci_resume_device_data(mvdev);
+ if (IS_ERR(migf))
+ return ERR_CAST(migf);
+ get_file(migf->filp);
+ mvdev->resuming_migf = migf;
+ return migf->filp;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
+ ret = mlx5vf_cmd_load_vhca_state(mvdev,
+ mvdev->resuming_migf);
+ if (ret)
+ return ERR_PTR(ret);
+ mlx5vf_disable_fds(mvdev);
+ return NULL;
+ }
+
+ /*
+ * vfio_mig_get_next_state() does not use arcs other than the above
+ */
+ WARN_ON(true);
+ return ERR_PTR(-EINVAL);
+}
+
+/*
+ * This function is called in all state_mutex unlock cases to
+ * handle a 'deferred_reset' if exists.
+ */
+void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev)
+{
+again:
+ spin_lock(&mvdev->reset_lock);
+ if (mvdev->deferred_reset) {
+ mvdev->deferred_reset = false;
+ spin_unlock(&mvdev->reset_lock);
+ mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
+ mlx5vf_disable_fds(mvdev);
+ goto again;
+ }
+ mutex_unlock(&mvdev->state_mutex);
+ spin_unlock(&mvdev->reset_lock);
+}
+
+static struct file *
+mlx5vf_pci_set_device_state(struct vfio_device *vdev,
+ enum vfio_device_mig_state new_state)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+ enum vfio_device_mig_state next_state;
+ struct file *res = NULL;
+ int ret;
+
+ mutex_lock(&mvdev->state_mutex);
+ while (new_state != mvdev->mig_state) {
+ ret = vfio_mig_get_next_state(vdev, mvdev->mig_state,
+ new_state, &next_state);
+ if (ret) {
+ res = ERR_PTR(ret);
+ break;
+ }
+ res = mlx5vf_pci_step_device_state_locked(mvdev, next_state);
+ if (IS_ERR(res))
+ break;
+ mvdev->mig_state = next_state;
+ if (WARN_ON(res && new_state != mvdev->mig_state)) {
+ fput(res);
+ res = ERR_PTR(-EINVAL);
+ break;
+ }
+ }
+ mlx5vf_state_mutex_unlock(mvdev);
+ return res;
+}
+
+static int mlx5vf_pci_get_device_state(struct vfio_device *vdev,
+ enum vfio_device_mig_state *curr_state)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+
+ mutex_lock(&mvdev->state_mutex);
+ *curr_state = mvdev->mig_state;
+ mlx5vf_state_mutex_unlock(mvdev);
+ return 0;
+}
+
+static void mlx5vf_pci_aer_reset_done(struct pci_dev *pdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev);
+
+ if (!mvdev->migrate_cap)
+ return;
+
+ /*
+ * As the higher VFIO layers are holding locks across reset and using
+ * those same locks with the mm_lock we need to prevent ABBA deadlock
+ * with the state_mutex and mm_lock.
+ * In case the state_mutex was taken already we defer the cleanup work
+ * to the unlock flow of the other running context.
+ */
+ spin_lock(&mvdev->reset_lock);
+ mvdev->deferred_reset = true;
+ if (!mutex_trylock(&mvdev->state_mutex)) {
+ spin_unlock(&mvdev->reset_lock);
+ return;
+ }
+ spin_unlock(&mvdev->reset_lock);
+ mlx5vf_state_mutex_unlock(mvdev);
+}
+
+static int mlx5vf_pci_open_device(struct vfio_device *core_vdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ core_vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+ struct vfio_pci_core_device *vdev = &mvdev->core_device;
+ int ret;
+
+ ret = vfio_pci_core_enable(vdev);
+ if (ret)
+ return ret;
+
+ if (mvdev->migrate_cap)
+ mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
+ vfio_pci_core_finish_enable(vdev);
+ return 0;
+}
+
+static void mlx5vf_pci_close_device(struct vfio_device *core_vdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ core_vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+
+ mlx5vf_cmd_close_migratable(mvdev);
+ vfio_pci_core_close_device(core_vdev);
+}
+
+static const struct vfio_migration_ops mlx5vf_pci_mig_ops = {
+ .migration_set_state = mlx5vf_pci_set_device_state,
+ .migration_get_state = mlx5vf_pci_get_device_state,
+};
+
+static const struct vfio_log_ops mlx5vf_pci_log_ops = {
+ .log_start = mlx5vf_start_page_tracker,
+ .log_stop = mlx5vf_stop_page_tracker,
+ .log_read_and_clear = mlx5vf_tracker_read_and_clear,
+};
+
+static int mlx5vf_pci_init_dev(struct vfio_device *core_vdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(core_vdev,
+ struct mlx5vf_pci_core_device, core_device.vdev);
+ int ret;
+
+ ret = vfio_pci_core_init_dev(core_vdev);
+ if (ret)
+ return ret;
+
+ mlx5vf_cmd_set_migratable(mvdev, &mlx5vf_pci_mig_ops,
+ &mlx5vf_pci_log_ops);
+
+ return 0;
+}
+
+static void mlx5vf_pci_release_dev(struct vfio_device *core_vdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(core_vdev,
+ struct mlx5vf_pci_core_device, core_device.vdev);
+
+ mlx5vf_cmd_remove_migratable(mvdev);
+ vfio_pci_core_release_dev(core_vdev);
+}
+
+static const struct vfio_device_ops mlx5vf_pci_ops = {
+ .name = "mlx5-vfio-pci",
+ .init = mlx5vf_pci_init_dev,
+ .release = mlx5vf_pci_release_dev,
+ .open_device = mlx5vf_pci_open_device,
+ .close_device = mlx5vf_pci_close_device,
+ .ioctl = vfio_pci_core_ioctl,
+ .device_feature = vfio_pci_core_ioctl_feature,
+ .read = vfio_pci_core_read,
+ .write = vfio_pci_core_write,
+ .mmap = vfio_pci_core_mmap,
+ .request = vfio_pci_core_request,
+ .match = vfio_pci_core_match,
+};
+
+static int mlx5vf_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct mlx5vf_pci_core_device *mvdev;
+ int ret;
+
+ mvdev = vfio_alloc_device(mlx5vf_pci_core_device, core_device.vdev,
+ &pdev->dev, &mlx5vf_pci_ops);
+ if (IS_ERR(mvdev))
+ return PTR_ERR(mvdev);
+
+ dev_set_drvdata(&pdev->dev, &mvdev->core_device);
+ ret = vfio_pci_core_register_device(&mvdev->core_device);
+ if (ret)
+ goto out_put_vdev;
+ return 0;
+
+out_put_vdev:
+ vfio_put_device(&mvdev->core_device.vdev);
+ return ret;
+}
+
+static void mlx5vf_pci_remove(struct pci_dev *pdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev);
+
+ vfio_pci_core_unregister_device(&mvdev->core_device);
+ vfio_put_device(&mvdev->core_device.vdev);
+}
+
+static const struct pci_device_id mlx5vf_pci_table[] = {
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_MELLANOX, 0x101e) }, /* ConnectX Family mlx5Gen Virtual Function */
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, mlx5vf_pci_table);
+
+static const struct pci_error_handlers mlx5vf_err_handlers = {
+ .reset_done = mlx5vf_pci_aer_reset_done,
+ .error_detected = vfio_pci_core_aer_err_detected,
+};
+
+static struct pci_driver mlx5vf_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mlx5vf_pci_table,
+ .probe = mlx5vf_pci_probe,
+ .remove = mlx5vf_pci_remove,
+ .err_handler = &mlx5vf_err_handlers,
+ .driver_managed_dma = true,
+};
+
+static void __exit mlx5vf_pci_cleanup(void)
+{
+ pci_unregister_driver(&mlx5vf_pci_driver);
+}
+
+static int __init mlx5vf_pci_init(void)
+{
+ return pci_register_driver(&mlx5vf_pci_driver);
+}
+
+module_init(mlx5vf_pci_init);
+module_exit(mlx5vf_pci_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Max Gurtovoy <mgurtovoy@nvidia.com>");
+MODULE_AUTHOR("Yishai Hadas <yishaih@nvidia.com>");
+MODULE_DESCRIPTION(
+ "MLX5 VFIO PCI - User Level meta-driver for MLX5 device family");
diff --git a/drivers/vfio/pci/trace.h b/drivers/vfio/pci/trace.h
new file mode 100644
index 000000000..b2aa986ab
--- /dev/null
+++ b/drivers/vfio/pci/trace.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * VFIO PCI mmap/mmap_fault tracepoints
+ *
+ * Copyright (C) 2018 IBM Corp. All rights reserved.
+ * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vfio_pci
+
+#if !defined(_TRACE_VFIO_PCI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VFIO_PCI_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vfio_pci_nvgpu_mmap_fault,
+ TP_PROTO(struct pci_dev *pdev, unsigned long hpa, unsigned long ua,
+ vm_fault_t ret),
+ TP_ARGS(pdev, hpa, ua, ret),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(unsigned long, hpa)
+ __field(unsigned long, ua)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->name = dev_name(&pdev->dev),
+ __entry->hpa = hpa;
+ __entry->ua = ua;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%s: %lx -> %lx ret=%d", __entry->name, __entry->hpa,
+ __entry->ua, __entry->ret)
+);
+
+TRACE_EVENT(vfio_pci_nvgpu_mmap,
+ TP_PROTO(struct pci_dev *pdev, unsigned long hpa, unsigned long ua,
+ unsigned long size, int ret),
+ TP_ARGS(pdev, hpa, ua, size, ret),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(unsigned long, hpa)
+ __field(unsigned long, ua)
+ __field(unsigned long, size)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->name = dev_name(&pdev->dev),
+ __entry->hpa = hpa;
+ __entry->ua = ua;
+ __entry->size = size;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%s: %lx -> %lx size=%lx ret=%d", __entry->name, __entry->hpa,
+ __entry->ua, __entry->size, __entry->ret)
+);
+
+TRACE_EVENT(vfio_pci_npu2_mmap,
+ TP_PROTO(struct pci_dev *pdev, unsigned long hpa, unsigned long ua,
+ unsigned long size, int ret),
+ TP_ARGS(pdev, hpa, ua, size, ret),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(unsigned long, hpa)
+ __field(unsigned long, ua)
+ __field(unsigned long, size)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->name = dev_name(&pdev->dev),
+ __entry->hpa = hpa;
+ __entry->ua = ua;
+ __entry->size = size;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%s: %lx -> %lx size=%lx ret=%d", __entry->name, __entry->hpa,
+ __entry->ua, __entry->size, __entry->ret)
+);
+
+#endif /* _TRACE_VFIO_PCI_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/vfio/pci
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
new file mode 100644
index 000000000..1d4919edf
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * Derived from original vfio:
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ * Author: Tom Lyon, pugs@cisco.com
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/file.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "vfio_pci_priv.h"
+
+#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
+#define DRIVER_DESC "VFIO PCI - User Level meta-driver"
+
+static char ids[1024] __initdata;
+module_param_string(ids, ids, sizeof(ids), 0);
+MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
+
+static bool nointxmask;
+module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(nointxmask,
+ "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
+
+#ifdef CONFIG_VFIO_PCI_VGA
+static bool disable_vga;
+module_param(disable_vga, bool, S_IRUGO);
+MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
+#endif
+
+static bool disable_idle_d3;
+module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_idle_d3,
+ "Disable using the PCI D3 low power state for idle, unused devices");
+
+static bool enable_sriov;
+#ifdef CONFIG_PCI_IOV
+module_param(enable_sriov, bool, 0644);
+MODULE_PARM_DESC(enable_sriov, "Enable support for SR-IOV configuration. Enabling SR-IOV on a PF typically requires support of the userspace PF driver, enabling VFs without such support may result in non-functional VFs or PF.");
+#endif
+
+static bool disable_denylist;
+module_param(disable_denylist, bool, 0444);
+MODULE_PARM_DESC(disable_denylist, "Disable use of device denylist. Disabling the denylist allows binding to devices with known errata that may lead to exploitable stability or security issues when accessed by untrusted users.");
+
+static bool vfio_pci_dev_in_denylist(struct pci_dev *pdev)
+{
+ switch (pdev->vendor) {
+ case PCI_VENDOR_ID_INTEL:
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
+ case PCI_DEVICE_ID_INTEL_QAT_C62X:
+ case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ return false;
+}
+
+static bool vfio_pci_is_denylisted(struct pci_dev *pdev)
+{
+ if (!vfio_pci_dev_in_denylist(pdev))
+ return false;
+
+ if (disable_denylist) {
+ pci_warn(pdev,
+ "device denylist disabled - allowing device %04x:%04x.\n",
+ pdev->vendor, pdev->device);
+ return false;
+ }
+
+ pci_warn(pdev, "%04x:%04x exists in vfio-pci device denylist, driver probing disallowed.\n",
+ pdev->vendor, pdev->device);
+
+ return true;
+}
+
+static int vfio_pci_open_device(struct vfio_device *core_vdev)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ struct pci_dev *pdev = vdev->pdev;
+ int ret;
+
+ ret = vfio_pci_core_enable(vdev);
+ if (ret)
+ return ret;
+
+ if (vfio_pci_is_vga(pdev) &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
+ ret = vfio_pci_igd_init(vdev);
+ if (ret && ret != -ENODEV) {
+ pci_warn(pdev, "Failed to setup Intel IGD regions\n");
+ vfio_pci_core_disable(vdev);
+ return ret;
+ }
+ }
+
+ vfio_pci_core_finish_enable(vdev);
+
+ return 0;
+}
+
+static const struct vfio_device_ops vfio_pci_ops = {
+ .name = "vfio-pci",
+ .init = vfio_pci_core_init_dev,
+ .release = vfio_pci_core_release_dev,
+ .open_device = vfio_pci_open_device,
+ .close_device = vfio_pci_core_close_device,
+ .ioctl = vfio_pci_core_ioctl,
+ .device_feature = vfio_pci_core_ioctl_feature,
+ .read = vfio_pci_core_read,
+ .write = vfio_pci_core_write,
+ .mmap = vfio_pci_core_mmap,
+ .request = vfio_pci_core_request,
+ .match = vfio_pci_core_match,
+};
+
+static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct vfio_pci_core_device *vdev;
+ int ret;
+
+ if (vfio_pci_is_denylisted(pdev))
+ return -EINVAL;
+
+ vdev = vfio_alloc_device(vfio_pci_core_device, vdev, &pdev->dev,
+ &vfio_pci_ops);
+ if (IS_ERR(vdev))
+ return PTR_ERR(vdev);
+
+ dev_set_drvdata(&pdev->dev, vdev);
+ ret = vfio_pci_core_register_device(vdev);
+ if (ret)
+ goto out_put_vdev;
+ return 0;
+
+out_put_vdev:
+ vfio_put_device(&vdev->vdev);
+ return ret;
+}
+
+static void vfio_pci_remove(struct pci_dev *pdev)
+{
+ struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
+
+ vfio_pci_core_unregister_device(vdev);
+ vfio_put_device(&vdev->vdev);
+}
+
+static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
+{
+ struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
+
+ if (!enable_sriov)
+ return -ENOENT;
+
+ return vfio_pci_core_sriov_configure(vdev, nr_virtfn);
+}
+
+static const struct pci_device_id vfio_pci_table[] = {
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_ANY_ID, PCI_ANY_ID) }, /* match all by default */
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, vfio_pci_table);
+
+static struct pci_driver vfio_pci_driver = {
+ .name = "vfio-pci",
+ .id_table = vfio_pci_table,
+ .probe = vfio_pci_probe,
+ .remove = vfio_pci_remove,
+ .sriov_configure = vfio_pci_sriov_configure,
+ .err_handler = &vfio_pci_core_err_handlers,
+ .driver_managed_dma = true,
+};
+
+static void __init vfio_pci_fill_ids(void)
+{
+ char *p, *id;
+ int rc;
+
+ /* no ids passed actually */
+ if (ids[0] == '\0')
+ return;
+
+ /* add ids specified in the module parameter */
+ p = ids;
+ while ((id = strsep(&p, ","))) {
+ unsigned int vendor, device, subvendor = PCI_ANY_ID,
+ subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
+ int fields;
+
+ if (!strlen(id))
+ continue;
+
+ fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
+ &vendor, &device, &subvendor, &subdevice,
+ &class, &class_mask);
+
+ if (fields < 2) {
+ pr_warn("invalid id string \"%s\"\n", id);
+ continue;
+ }
+
+ rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
+ subvendor, subdevice, class, class_mask, 0);
+ if (rc)
+ pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
+ vendor, device, subvendor, subdevice,
+ class, class_mask, rc);
+ else
+ pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
+ vendor, device, subvendor, subdevice,
+ class, class_mask);
+ }
+}
+
+static int __init vfio_pci_init(void)
+{
+ int ret;
+ bool is_disable_vga = true;
+
+#ifdef CONFIG_VFIO_PCI_VGA
+ is_disable_vga = disable_vga;
+#endif
+
+ vfio_pci_core_set_params(nointxmask, is_disable_vga, disable_idle_d3);
+
+ /* Register and scan for devices */
+ ret = pci_register_driver(&vfio_pci_driver);
+ if (ret)
+ return ret;
+
+ vfio_pci_fill_ids();
+
+ if (disable_denylist)
+ pr_warn("device denylist disabled.\n");
+
+ return 0;
+}
+module_init(vfio_pci_init);
+
+static void __exit vfio_pci_cleanup(void)
+{
+ pci_unregister_driver(&vfio_pci_driver);
+}
+module_exit(vfio_pci_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
new file mode 100644
index 000000000..4a350421c
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -0,0 +1,1961 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VFIO PCI config space virtualization
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * Derived from original vfio:
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ * Author: Tom Lyon, pugs@cisco.com
+ */
+
+/*
+ * This code handles reading and writing of PCI configuration registers.
+ * This is hairy because we want to allow a lot of flexibility to the
+ * user driver, but cannot trust it with all of the config fields.
+ * Tables determine which fields can be read and written, as well as
+ * which fields are 'virtualized' - special actions and translations to
+ * make it appear to the user that he has control, when in fact things
+ * must be negotiated with the underlying OS.
+ */
+
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+#include <linux/slab.h>
+
+#include "vfio_pci_priv.h"
+
+/* Fake capability ID for standard config space */
+#define PCI_CAP_ID_BASIC 0
+
+#define is_bar(offset) \
+ ((offset >= PCI_BASE_ADDRESS_0 && offset < PCI_BASE_ADDRESS_5 + 4) || \
+ (offset >= PCI_ROM_ADDRESS && offset < PCI_ROM_ADDRESS + 4))
+
+/*
+ * Lengths of PCI Config Capabilities
+ * 0: Removed from the user visible capability list
+ * FF: Variable length
+ */
+static const u8 pci_cap_length[PCI_CAP_ID_MAX + 1] = {
+ [PCI_CAP_ID_BASIC] = PCI_STD_HEADER_SIZEOF, /* pci config header */
+ [PCI_CAP_ID_PM] = PCI_PM_SIZEOF,
+ [PCI_CAP_ID_AGP] = PCI_AGP_SIZEOF,
+ [PCI_CAP_ID_VPD] = PCI_CAP_VPD_SIZEOF,
+ [PCI_CAP_ID_SLOTID] = 0, /* bridge - don't care */
+ [PCI_CAP_ID_MSI] = 0xFF, /* 10, 14, 20, or 24 */
+ [PCI_CAP_ID_CHSWP] = 0, /* cpci - not yet */
+ [PCI_CAP_ID_PCIX] = 0xFF, /* 8 or 24 */
+ [PCI_CAP_ID_HT] = 0xFF, /* hypertransport */
+ [PCI_CAP_ID_VNDR] = 0xFF, /* variable */
+ [PCI_CAP_ID_DBG] = 0, /* debug - don't care */
+ [PCI_CAP_ID_CCRC] = 0, /* cpci - not yet */
+ [PCI_CAP_ID_SHPC] = 0, /* hotswap - not yet */
+ [PCI_CAP_ID_SSVID] = 0, /* bridge - don't care */
+ [PCI_CAP_ID_AGP3] = 0, /* AGP8x - not yet */
+ [PCI_CAP_ID_SECDEV] = 0, /* secure device not yet */
+ [PCI_CAP_ID_EXP] = 0xFF, /* 20 or 44 */
+ [PCI_CAP_ID_MSIX] = PCI_CAP_MSIX_SIZEOF,
+ [PCI_CAP_ID_SATA] = 0xFF,
+ [PCI_CAP_ID_AF] = PCI_CAP_AF_SIZEOF,
+};
+
+/*
+ * Lengths of PCIe/PCI-X Extended Config Capabilities
+ * 0: Removed or masked from the user visible capability list
+ * FF: Variable length
+ */
+static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = {
+ [PCI_EXT_CAP_ID_ERR] = PCI_ERR_ROOT_COMMAND,
+ [PCI_EXT_CAP_ID_VC] = 0xFF,
+ [PCI_EXT_CAP_ID_DSN] = PCI_EXT_CAP_DSN_SIZEOF,
+ [PCI_EXT_CAP_ID_PWR] = PCI_EXT_CAP_PWR_SIZEOF,
+ [PCI_EXT_CAP_ID_RCLD] = 0, /* root only - don't care */
+ [PCI_EXT_CAP_ID_RCILC] = 0, /* root only - don't care */
+ [PCI_EXT_CAP_ID_RCEC] = 0, /* root only - don't care */
+ [PCI_EXT_CAP_ID_MFVC] = 0xFF,
+ [PCI_EXT_CAP_ID_VC9] = 0xFF, /* same as CAP_ID_VC */
+ [PCI_EXT_CAP_ID_RCRB] = 0, /* root only - don't care */
+ [PCI_EXT_CAP_ID_VNDR] = 0xFF,
+ [PCI_EXT_CAP_ID_CAC] = 0, /* obsolete */
+ [PCI_EXT_CAP_ID_ACS] = 0xFF,
+ [PCI_EXT_CAP_ID_ARI] = PCI_EXT_CAP_ARI_SIZEOF,
+ [PCI_EXT_CAP_ID_ATS] = PCI_EXT_CAP_ATS_SIZEOF,
+ [PCI_EXT_CAP_ID_SRIOV] = PCI_EXT_CAP_SRIOV_SIZEOF,
+ [PCI_EXT_CAP_ID_MRIOV] = 0, /* not yet */
+ [PCI_EXT_CAP_ID_MCAST] = PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF,
+ [PCI_EXT_CAP_ID_PRI] = PCI_EXT_CAP_PRI_SIZEOF,
+ [PCI_EXT_CAP_ID_AMD_XXX] = 0, /* not yet */
+ [PCI_EXT_CAP_ID_REBAR] = 0xFF,
+ [PCI_EXT_CAP_ID_DPA] = 0xFF,
+ [PCI_EXT_CAP_ID_TPH] = 0xFF,
+ [PCI_EXT_CAP_ID_LTR] = PCI_EXT_CAP_LTR_SIZEOF,
+ [PCI_EXT_CAP_ID_SECPCI] = 0, /* not yet */
+ [PCI_EXT_CAP_ID_PMUX] = 0, /* not yet */
+ [PCI_EXT_CAP_ID_PASID] = 0, /* not yet */
+};
+
+/*
+ * Read/Write Permission Bits - one bit for each bit in capability
+ * Any field can be read if it exists, but what is read depends on
+ * whether the field is 'virtualized', or just pass through to the
+ * hardware. Any virtualized field is also virtualized for writes.
+ * Writes are only permitted if they have a 1 bit here.
+ */
+struct perm_bits {
+ u8 *virt; /* read/write virtual data, not hw */
+ u8 *write; /* writeable bits */
+ int (*readfn)(struct vfio_pci_core_device *vdev, int pos, int count,
+ struct perm_bits *perm, int offset, __le32 *val);
+ int (*writefn)(struct vfio_pci_core_device *vdev, int pos, int count,
+ struct perm_bits *perm, int offset, __le32 val);
+};
+
+#define NO_VIRT 0
+#define ALL_VIRT 0xFFFFFFFFU
+#define NO_WRITE 0
+#define ALL_WRITE 0xFFFFFFFFU
+
+static int vfio_user_config_read(struct pci_dev *pdev, int offset,
+ __le32 *val, int count)
+{
+ int ret = -EINVAL;
+ u32 tmp_val = 0;
+
+ switch (count) {
+ case 1:
+ {
+ u8 tmp;
+ ret = pci_user_read_config_byte(pdev, offset, &tmp);
+ tmp_val = tmp;
+ break;
+ }
+ case 2:
+ {
+ u16 tmp;
+ ret = pci_user_read_config_word(pdev, offset, &tmp);
+ tmp_val = tmp;
+ break;
+ }
+ case 4:
+ ret = pci_user_read_config_dword(pdev, offset, &tmp_val);
+ break;
+ }
+
+ *val = cpu_to_le32(tmp_val);
+
+ return ret;
+}
+
+static int vfio_user_config_write(struct pci_dev *pdev, int offset,
+ __le32 val, int count)
+{
+ int ret = -EINVAL;
+ u32 tmp_val = le32_to_cpu(val);
+
+ switch (count) {
+ case 1:
+ ret = pci_user_write_config_byte(pdev, offset, tmp_val);
+ break;
+ case 2:
+ ret = pci_user_write_config_word(pdev, offset, tmp_val);
+ break;
+ case 4:
+ ret = pci_user_write_config_dword(pdev, offset, tmp_val);
+ break;
+ }
+
+ return ret;
+}
+
+static int vfio_default_config_read(struct vfio_pci_core_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 *val)
+{
+ __le32 virt = 0;
+
+ memcpy(val, vdev->vconfig + pos, count);
+
+ memcpy(&virt, perm->virt + offset, count);
+
+ /* Any non-virtualized bits? */
+ if (cpu_to_le32(~0U >> (32 - (count * 8))) != virt) {
+ struct pci_dev *pdev = vdev->pdev;
+ __le32 phys_val = 0;
+ int ret;
+
+ ret = vfio_user_config_read(pdev, pos, &phys_val, count);
+ if (ret)
+ return ret;
+
+ *val = (phys_val & ~virt) | (*val & virt);
+ }
+
+ return count;
+}
+
+static int vfio_default_config_write(struct vfio_pci_core_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ __le32 virt = 0, write = 0;
+
+ memcpy(&write, perm->write + offset, count);
+
+ if (!write)
+ return count; /* drop, no writable bits */
+
+ memcpy(&virt, perm->virt + offset, count);
+
+ /* Virtualized and writable bits go to vconfig */
+ if (write & virt) {
+ __le32 virt_val = 0;
+
+ memcpy(&virt_val, vdev->vconfig + pos, count);
+
+ virt_val &= ~(write & virt);
+ virt_val |= (val & (write & virt));
+
+ memcpy(vdev->vconfig + pos, &virt_val, count);
+ }
+
+ /* Non-virtualized and writable bits go to hardware */
+ if (write & ~virt) {
+ struct pci_dev *pdev = vdev->pdev;
+ __le32 phys_val = 0;
+ int ret;
+
+ ret = vfio_user_config_read(pdev, pos, &phys_val, count);
+ if (ret)
+ return ret;
+
+ phys_val &= ~(write & ~virt);
+ phys_val |= (val & (write & ~virt));
+
+ ret = vfio_user_config_write(pdev, pos, phys_val, count);
+ if (ret)
+ return ret;
+ }
+
+ return count;
+}
+
+/* Allow direct read from hardware, except for capability next pointer */
+static int vfio_direct_config_read(struct vfio_pci_core_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 *val)
+{
+ int ret;
+
+ ret = vfio_user_config_read(vdev->pdev, pos, val, count);
+ if (ret)
+ return ret;
+
+ if (pos >= PCI_CFG_SPACE_SIZE) { /* Extended cap header mangling */
+ if (offset < 4)
+ memcpy(val, vdev->vconfig + pos, count);
+ } else if (pos >= PCI_STD_HEADER_SIZEOF) { /* Std cap mangling */
+ if (offset == PCI_CAP_LIST_ID && count > 1)
+ memcpy(val, vdev->vconfig + pos,
+ min(PCI_CAP_FLAGS, count));
+ else if (offset == PCI_CAP_LIST_NEXT)
+ memcpy(val, vdev->vconfig + pos, 1);
+ }
+
+ return count;
+}
+
+/* Raw access skips any kind of virtualization */
+static int vfio_raw_config_write(struct vfio_pci_core_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ int ret;
+
+ ret = vfio_user_config_write(vdev->pdev, pos, val, count);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int vfio_raw_config_read(struct vfio_pci_core_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 *val)
+{
+ int ret;
+
+ ret = vfio_user_config_read(vdev->pdev, pos, val, count);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+/* Virt access uses only virtualization */
+static int vfio_virt_config_write(struct vfio_pci_core_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ memcpy(vdev->vconfig + pos, &val, count);
+ return count;
+}
+
+static int vfio_virt_config_read(struct vfio_pci_core_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 *val)
+{
+ memcpy(val, vdev->vconfig + pos, count);
+ return count;
+}
+
+/* Default capability regions to read-only, no-virtualization */
+static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = {
+ [0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
+};
+static struct perm_bits ecap_perms[PCI_EXT_CAP_ID_MAX + 1] = {
+ [0 ... PCI_EXT_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
+};
+/*
+ * Default unassigned regions to raw read-write access. Some devices
+ * require this to function as they hide registers between the gaps in
+ * config space (be2net). Like MMIO and I/O port registers, we have
+ * to trust the hardware isolation.
+ */
+static struct perm_bits unassigned_perms = {
+ .readfn = vfio_raw_config_read,
+ .writefn = vfio_raw_config_write
+};
+
+static struct perm_bits virt_perms = {
+ .readfn = vfio_virt_config_read,
+ .writefn = vfio_virt_config_write
+};
+
+static void free_perm_bits(struct perm_bits *perm)
+{
+ kfree(perm->virt);
+ kfree(perm->write);
+ perm->virt = NULL;
+ perm->write = NULL;
+}
+
+static int alloc_perm_bits(struct perm_bits *perm, int size)
+{
+ /*
+ * Round up all permission bits to the next dword, this lets us
+ * ignore whether a read/write exceeds the defined capability
+ * structure. We can do this because:
+ * - Standard config space is already dword aligned
+ * - Capabilities are all dword aligned (bits 0:1 of next reserved)
+ * - Express capabilities defined as dword aligned
+ */
+ size = round_up(size, 4);
+
+ /*
+ * Zero state is
+ * - All Readable, None Writeable, None Virtualized
+ */
+ perm->virt = kzalloc(size, GFP_KERNEL);
+ perm->write = kzalloc(size, GFP_KERNEL);
+ if (!perm->virt || !perm->write) {
+ free_perm_bits(perm);
+ return -ENOMEM;
+ }
+
+ perm->readfn = vfio_default_config_read;
+ perm->writefn = vfio_default_config_write;
+
+ return 0;
+}
+
+/*
+ * Helper functions for filling in permission tables
+ */
+static inline void p_setb(struct perm_bits *p, int off, u8 virt, u8 write)
+{
+ p->virt[off] = virt;
+ p->write[off] = write;
+}
+
+/* Handle endian-ness - pci and tables are little-endian */
+static inline void p_setw(struct perm_bits *p, int off, u16 virt, u16 write)
+{
+ *(__le16 *)(&p->virt[off]) = cpu_to_le16(virt);
+ *(__le16 *)(&p->write[off]) = cpu_to_le16(write);
+}
+
+/* Handle endian-ness - pci and tables are little-endian */
+static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
+{
+ *(__le32 *)(&p->virt[off]) = cpu_to_le32(virt);
+ *(__le32 *)(&p->write[off]) = cpu_to_le32(write);
+}
+
+/* Caller should hold memory_lock semaphore */
+bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
+
+ /*
+ * Memory region cannot be accessed if device power state is D3.
+ *
+ * SR-IOV VF memory enable is handled by the MSE bit in the
+ * PF SR-IOV capability, there's therefore no need to trigger
+ * faults based on the virtual value.
+ */
+ return pdev->current_state < PCI_D3hot &&
+ (pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY));
+}
+
+/*
+ * Restore the *real* BARs after we detect a FLR or backdoor reset.
+ * (backdoor = some device specific technique that we didn't catch)
+ */
+static void vfio_bar_restore(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u32 *rbar = vdev->rbar;
+ u16 cmd;
+ int i;
+
+ if (pdev->is_virtfn)
+ return;
+
+ pci_info(pdev, "%s: reset recovery - restoring BARs\n", __func__);
+
+ for (i = PCI_BASE_ADDRESS_0; i <= PCI_BASE_ADDRESS_5; i += 4, rbar++)
+ pci_user_write_config_dword(pdev, i, *rbar);
+
+ pci_user_write_config_dword(pdev, PCI_ROM_ADDRESS, *rbar);
+
+ if (vdev->nointx) {
+ pci_user_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_INTX_DISABLE;
+ pci_user_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+}
+
+static __le32 vfio_generate_bar_flags(struct pci_dev *pdev, int bar)
+{
+ unsigned long flags = pci_resource_flags(pdev, bar);
+ u32 val;
+
+ if (flags & IORESOURCE_IO)
+ return cpu_to_le32(PCI_BASE_ADDRESS_SPACE_IO);
+
+ val = PCI_BASE_ADDRESS_SPACE_MEMORY;
+
+ if (flags & IORESOURCE_PREFETCH)
+ val |= PCI_BASE_ADDRESS_MEM_PREFETCH;
+
+ if (flags & IORESOURCE_MEM_64)
+ val |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+
+ return cpu_to_le32(val);
+}
+
+/*
+ * Pretend we're hardware and tweak the values of the *virtual* PCI BARs
+ * to reflect the hardware capabilities. This implements BAR sizing.
+ */
+static void vfio_bar_fixup(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int i;
+ __le32 *vbar;
+ u64 mask;
+
+ if (!vdev->bardirty)
+ return;
+
+ vbar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++, vbar++) {
+ int bar = i + PCI_STD_RESOURCES;
+
+ if (!pci_resource_start(pdev, bar)) {
+ *vbar = 0; /* Unmapped by host = unimplemented to user */
+ continue;
+ }
+
+ mask = ~(pci_resource_len(pdev, bar) - 1);
+
+ *vbar &= cpu_to_le32((u32)mask);
+ *vbar |= vfio_generate_bar_flags(pdev, bar);
+
+ if (*vbar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
+ vbar++;
+ *vbar &= cpu_to_le32((u32)(mask >> 32));
+ i++;
+ }
+ }
+
+ vbar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
+
+ /*
+ * NB. REGION_INFO will have reported zero size if we weren't able
+ * to read the ROM, but we still return the actual BAR size here if
+ * it exists (or the shadow ROM space).
+ */
+ if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) {
+ mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1);
+ mask |= PCI_ROM_ADDRESS_ENABLE;
+ *vbar &= cpu_to_le32((u32)mask);
+ } else if (pdev->resource[PCI_ROM_RESOURCE].flags &
+ IORESOURCE_ROM_SHADOW) {
+ mask = ~(0x20000 - 1);
+ mask |= PCI_ROM_ADDRESS_ENABLE;
+ *vbar &= cpu_to_le32((u32)mask);
+ } else
+ *vbar = 0;
+
+ vdev->bardirty = false;
+}
+
+static int vfio_basic_config_read(struct vfio_pci_core_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 *val)
+{
+ if (is_bar(offset)) /* pos == offset for basic config */
+ vfio_bar_fixup(vdev);
+
+ count = vfio_default_config_read(vdev, pos, count, perm, offset, val);
+
+ /* Mask in virtual memory enable */
+ if (offset == PCI_COMMAND && vdev->pdev->no_command_memory) {
+ u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
+ u32 tmp_val = le32_to_cpu(*val);
+
+ tmp_val |= cmd & PCI_COMMAND_MEMORY;
+ *val = cpu_to_le32(tmp_val);
+ }
+
+ return count;
+}
+
+/* Test whether BARs match the value we think they should contain */
+static bool vfio_need_bar_restore(struct vfio_pci_core_device *vdev)
+{
+ int i = 0, pos = PCI_BASE_ADDRESS_0, ret;
+ u32 bar;
+
+ for (; pos <= PCI_BASE_ADDRESS_5; i++, pos += 4) {
+ if (vdev->rbar[i]) {
+ ret = pci_user_read_config_dword(vdev->pdev, pos, &bar);
+ if (ret || vdev->rbar[i] != bar)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int vfio_basic_config_write(struct vfio_pci_core_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ __le16 *virt_cmd;
+ u16 new_cmd = 0;
+ int ret;
+
+ virt_cmd = (__le16 *)&vdev->vconfig[PCI_COMMAND];
+
+ if (offset == PCI_COMMAND) {
+ bool phys_mem, virt_mem, new_mem, phys_io, virt_io, new_io;
+ u16 phys_cmd;
+
+ ret = pci_user_read_config_word(pdev, PCI_COMMAND, &phys_cmd);
+ if (ret)
+ return ret;
+
+ new_cmd = le32_to_cpu(val);
+
+ phys_io = !!(phys_cmd & PCI_COMMAND_IO);
+ virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
+ new_io = !!(new_cmd & PCI_COMMAND_IO);
+
+ phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
+ virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
+ new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
+
+ if (!new_mem)
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ else
+ down_write(&vdev->memory_lock);
+
+ /*
+ * If the user is writing mem/io enable (new_mem/io) and we
+ * think it's already enabled (virt_mem/io), but the hardware
+ * shows it disabled (phys_mem/io, then the device has
+ * undergone some kind of backdoor reset and needs to be
+ * restored before we allow it to enable the bars.
+ * SR-IOV devices will trigger this - for mem enable let's
+ * catch this now and for io enable it will be caught later
+ */
+ if ((new_mem && virt_mem && !phys_mem &&
+ !pdev->no_command_memory) ||
+ (new_io && virt_io && !phys_io) ||
+ vfio_need_bar_restore(vdev))
+ vfio_bar_restore(vdev);
+ }
+
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0) {
+ if (offset == PCI_COMMAND)
+ up_write(&vdev->memory_lock);
+ return count;
+ }
+
+ /*
+ * Save current memory/io enable bits in vconfig to allow for
+ * the test above next time.
+ */
+ if (offset == PCI_COMMAND) {
+ u16 mask = PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
+
+ *virt_cmd &= cpu_to_le16(~mask);
+ *virt_cmd |= cpu_to_le16(new_cmd & mask);
+
+ up_write(&vdev->memory_lock);
+ }
+
+ /* Emulate INTx disable */
+ if (offset >= PCI_COMMAND && offset <= PCI_COMMAND + 1) {
+ bool virt_intx_disable;
+
+ virt_intx_disable = !!(le16_to_cpu(*virt_cmd) &
+ PCI_COMMAND_INTX_DISABLE);
+
+ if (virt_intx_disable && !vdev->virq_disabled) {
+ vdev->virq_disabled = true;
+ vfio_pci_intx_mask(vdev);
+ } else if (!virt_intx_disable && vdev->virq_disabled) {
+ vdev->virq_disabled = false;
+ vfio_pci_intx_unmask(vdev);
+ }
+ }
+
+ if (is_bar(offset))
+ vdev->bardirty = true;
+
+ return count;
+}
+
+/* Permissions for the Basic PCI Header */
+static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
+{
+ if (alloc_perm_bits(perm, PCI_STD_HEADER_SIZEOF))
+ return -ENOMEM;
+
+ perm->readfn = vfio_basic_config_read;
+ perm->writefn = vfio_basic_config_write;
+
+ /* Virtualized for SR-IOV functions, which just have FFFF */
+ p_setw(perm, PCI_VENDOR_ID, (u16)ALL_VIRT, NO_WRITE);
+ p_setw(perm, PCI_DEVICE_ID, (u16)ALL_VIRT, NO_WRITE);
+
+ /*
+ * Virtualize INTx disable, we use it internally for interrupt
+ * control and can emulate it for non-PCI 2.3 devices.
+ */
+ p_setw(perm, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE, (u16)ALL_WRITE);
+
+ /* Virtualize capability list, we might want to skip/disable */
+ p_setw(perm, PCI_STATUS, PCI_STATUS_CAP_LIST, NO_WRITE);
+
+ /* No harm to write */
+ p_setb(perm, PCI_CACHE_LINE_SIZE, NO_VIRT, (u8)ALL_WRITE);
+ p_setb(perm, PCI_LATENCY_TIMER, NO_VIRT, (u8)ALL_WRITE);
+ p_setb(perm, PCI_BIST, NO_VIRT, (u8)ALL_WRITE);
+
+ /* Virtualize all bars, can't touch the real ones */
+ p_setd(perm, PCI_BASE_ADDRESS_0, ALL_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_BASE_ADDRESS_1, ALL_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_BASE_ADDRESS_2, ALL_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_BASE_ADDRESS_3, ALL_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_BASE_ADDRESS_4, ALL_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_BASE_ADDRESS_5, ALL_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_ROM_ADDRESS, ALL_VIRT, ALL_WRITE);
+
+ /* Allow us to adjust capability chain */
+ p_setb(perm, PCI_CAPABILITY_LIST, (u8)ALL_VIRT, NO_WRITE);
+
+ /* Sometimes used by sw, just virtualize */
+ p_setb(perm, PCI_INTERRUPT_LINE, (u8)ALL_VIRT, (u8)ALL_WRITE);
+
+ /* Virtualize interrupt pin to allow hiding INTx */
+ p_setb(perm, PCI_INTERRUPT_PIN, (u8)ALL_VIRT, (u8)NO_WRITE);
+
+ return 0;
+}
+
+/*
+ * It takes all the required locks to protect the access of power related
+ * variables and then invokes vfio_pci_set_power_state().
+ */
+static void vfio_lock_and_set_power_state(struct vfio_pci_core_device *vdev,
+ pci_power_t state)
+{
+ if (state >= PCI_D3hot)
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ else
+ down_write(&vdev->memory_lock);
+
+ vfio_pci_set_power_state(vdev, state);
+ up_write(&vdev->memory_lock);
+}
+
+static int vfio_pm_config_write(struct vfio_pci_core_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0)
+ return count;
+
+ if (offset == PCI_PM_CTRL) {
+ pci_power_t state;
+
+ switch (le32_to_cpu(val) & PCI_PM_CTRL_STATE_MASK) {
+ case 0:
+ state = PCI_D0;
+ break;
+ case 1:
+ state = PCI_D1;
+ break;
+ case 2:
+ state = PCI_D2;
+ break;
+ case 3:
+ state = PCI_D3hot;
+ break;
+ }
+
+ vfio_lock_and_set_power_state(vdev, state);
+ }
+
+ return count;
+}
+
+/* Permissions for the Power Management capability */
+static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
+{
+ if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_PM]))
+ return -ENOMEM;
+
+ perm->writefn = vfio_pm_config_write;
+
+ /*
+ * We always virtualize the next field so we can remove
+ * capabilities from the chain if we want to.
+ */
+ p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
+
+ /*
+ * The guests can't process PME events. If any PME event will be
+ * generated, then it will be mostly handled in the host and the
+ * host will clear the PME_STATUS. So virtualize PME_Support bits.
+ * The vconfig bits will be cleared during device capability
+ * initialization.
+ */
+ p_setw(perm, PCI_PM_PMC, PCI_PM_CAP_PME_MASK, NO_WRITE);
+
+ /*
+ * Power management is defined *per function*, so we can let
+ * the user change power state, but we trap and initiate the
+ * change ourselves, so the state bits are read-only.
+ *
+ * The guest can't process PME from D3cold so virtualize PME_Status
+ * and PME_En bits. The vconfig bits will be cleared during device
+ * capability initialization.
+ */
+ p_setd(perm, PCI_PM_CTRL,
+ PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS,
+ ~(PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS |
+ PCI_PM_CTRL_STATE_MASK));
+
+ return 0;
+}
+
+static int vfio_vpd_config_write(struct vfio_pci_core_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ __le16 *paddr = (__le16 *)(vdev->vconfig + pos - offset + PCI_VPD_ADDR);
+ __le32 *pdata = (__le32 *)(vdev->vconfig + pos - offset + PCI_VPD_DATA);
+ u16 addr;
+ u32 data;
+
+ /*
+ * Write through to emulation. If the write includes the upper byte
+ * of PCI_VPD_ADDR, then the PCI_VPD_ADDR_F bit is written and we
+ * have work to do.
+ */
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0 || offset > PCI_VPD_ADDR + 1 ||
+ offset + count <= PCI_VPD_ADDR + 1)
+ return count;
+
+ addr = le16_to_cpu(*paddr);
+
+ if (addr & PCI_VPD_ADDR_F) {
+ data = le32_to_cpu(*pdata);
+ if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4)
+ return count;
+ } else {
+ data = 0;
+ if (pci_read_vpd(pdev, addr, 4, &data) < 0)
+ return count;
+ *pdata = cpu_to_le32(data);
+ }
+
+ /*
+ * Toggle PCI_VPD_ADDR_F in the emulated PCI_VPD_ADDR register to
+ * signal completion. If an error occurs above, we assume that not
+ * toggling this bit will induce a driver timeout.
+ */
+ addr ^= PCI_VPD_ADDR_F;
+ *paddr = cpu_to_le16(addr);
+
+ return count;
+}
+
+/* Permissions for Vital Product Data capability */
+static int __init init_pci_cap_vpd_perm(struct perm_bits *perm)
+{
+ if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_VPD]))
+ return -ENOMEM;
+
+ perm->writefn = vfio_vpd_config_write;
+
+ /*
+ * We always virtualize the next field so we can remove
+ * capabilities from the chain if we want to.
+ */
+ p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
+
+ /*
+ * Both the address and data registers are virtualized to
+ * enable access through the pci_vpd_read/write functions
+ */
+ p_setw(perm, PCI_VPD_ADDR, (u16)ALL_VIRT, (u16)ALL_WRITE);
+ p_setd(perm, PCI_VPD_DATA, ALL_VIRT, ALL_WRITE);
+
+ return 0;
+}
+
+/* Permissions for PCI-X capability */
+static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
+{
+ /* Alloc 24, but only 8 are used in v0 */
+ if (alloc_perm_bits(perm, PCI_CAP_PCIX_SIZEOF_V2))
+ return -ENOMEM;
+
+ p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
+
+ p_setw(perm, PCI_X_CMD, NO_VIRT, (u16)ALL_WRITE);
+ p_setd(perm, PCI_X_ECC_CSR, NO_VIRT, ALL_WRITE);
+ return 0;
+}
+
+static int vfio_exp_config_write(struct vfio_pci_core_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ __le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
+ offset + PCI_EXP_DEVCTL);
+ int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ;
+
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0)
+ return count;
+
+ /*
+ * The FLR bit is virtualized, if set and the device supports PCIe
+ * FLR, issue a reset_function. Regardless, clear the bit, the spec
+ * requires it to be always read as zero. NB, reset_function might
+ * not use a PCIe FLR, we don't have that level of granularity.
+ */
+ if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) {
+ u32 cap;
+ int ret;
+
+ *ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR);
+
+ ret = pci_user_read_config_dword(vdev->pdev,
+ pos - offset + PCI_EXP_DEVCAP,
+ &cap);
+
+ if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) {
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ pci_try_reset_function(vdev->pdev);
+ up_write(&vdev->memory_lock);
+ }
+ }
+
+ /*
+ * MPS is virtualized to the user, writes do not change the physical
+ * register since determining a proper MPS value requires a system wide
+ * device view. The MRRS is largely independent of MPS, but since the
+ * user does not have that system-wide view, they might set a safe, but
+ * inefficiently low value. Here we allow writes through to hardware,
+ * but we set the floor to the physical device MPS setting, so that
+ * we can at least use full TLPs, as defined by the MPS value.
+ *
+ * NB, if any devices actually depend on an artificially low MRRS
+ * setting, this will need to be revisited, perhaps with a quirk
+ * though pcie_set_readrq().
+ */
+ if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) {
+ readrq = 128 <<
+ ((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12);
+ readrq = max(readrq, pcie_get_mps(vdev->pdev));
+
+ pcie_set_readrq(vdev->pdev, readrq);
+ }
+
+ return count;
+}
+
+/* Permissions for PCI Express capability */
+static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
+{
+ /* Alloc largest of possible sizes */
+ if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
+ return -ENOMEM;
+
+ perm->writefn = vfio_exp_config_write;
+
+ p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
+
+ /*
+ * Allow writes to device control fields, except devctl_phantom,
+ * which could confuse IOMMU, MPS, which can break communication
+ * with other physical devices, and the ARI bit in devctl2, which
+ * is set at probe time. FLR and MRRS get virtualized via our
+ * writefn.
+ */
+ p_setw(perm, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD |
+ PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM);
+ p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
+ return 0;
+}
+
+static int vfio_af_config_write(struct vfio_pci_core_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL;
+
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0)
+ return count;
+
+ /*
+ * The FLR bit is virtualized, if set and the device supports AF
+ * FLR, issue a reset_function. Regardless, clear the bit, the spec
+ * requires it to be always read as zero. NB, reset_function might
+ * not use an AF FLR, we don't have that level of granularity.
+ */
+ if (*ctrl & PCI_AF_CTRL_FLR) {
+ u8 cap;
+ int ret;
+
+ *ctrl &= ~PCI_AF_CTRL_FLR;
+
+ ret = pci_user_read_config_byte(vdev->pdev,
+ pos - offset + PCI_AF_CAP,
+ &cap);
+
+ if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) {
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ pci_try_reset_function(vdev->pdev);
+ up_write(&vdev->memory_lock);
+ }
+ }
+
+ return count;
+}
+
+/* Permissions for Advanced Function capability */
+static int __init init_pci_cap_af_perm(struct perm_bits *perm)
+{
+ if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF]))
+ return -ENOMEM;
+
+ perm->writefn = vfio_af_config_write;
+
+ p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
+ p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR);
+ return 0;
+}
+
+/* Permissions for Advanced Error Reporting extended capability */
+static int __init init_pci_ext_cap_err_perm(struct perm_bits *perm)
+{
+ u32 mask;
+
+ if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_ERR]))
+ return -ENOMEM;
+
+ /*
+ * Virtualize the first dword of all express capabilities
+ * because it includes the next pointer. This lets us later
+ * remove capabilities from the chain if we need to.
+ */
+ p_setd(perm, 0, ALL_VIRT, NO_WRITE);
+
+ /* Writable bits mask */
+ mask = PCI_ERR_UNC_UND | /* Undefined */
+ PCI_ERR_UNC_DLP | /* Data Link Protocol */
+ PCI_ERR_UNC_SURPDN | /* Surprise Down */
+ PCI_ERR_UNC_POISON_TLP | /* Poisoned TLP */
+ PCI_ERR_UNC_FCP | /* Flow Control Protocol */
+ PCI_ERR_UNC_COMP_TIME | /* Completion Timeout */
+ PCI_ERR_UNC_COMP_ABORT | /* Completer Abort */
+ PCI_ERR_UNC_UNX_COMP | /* Unexpected Completion */
+ PCI_ERR_UNC_RX_OVER | /* Receiver Overflow */
+ PCI_ERR_UNC_MALF_TLP | /* Malformed TLP */
+ PCI_ERR_UNC_ECRC | /* ECRC Error Status */
+ PCI_ERR_UNC_UNSUP | /* Unsupported Request */
+ PCI_ERR_UNC_ACSV | /* ACS Violation */
+ PCI_ERR_UNC_INTN | /* internal error */
+ PCI_ERR_UNC_MCBTLP | /* MC blocked TLP */
+ PCI_ERR_UNC_ATOMEG | /* Atomic egress blocked */
+ PCI_ERR_UNC_TLPPRE; /* TLP prefix blocked */
+ p_setd(perm, PCI_ERR_UNCOR_STATUS, NO_VIRT, mask);
+ p_setd(perm, PCI_ERR_UNCOR_MASK, NO_VIRT, mask);
+ p_setd(perm, PCI_ERR_UNCOR_SEVER, NO_VIRT, mask);
+
+ mask = PCI_ERR_COR_RCVR | /* Receiver Error Status */
+ PCI_ERR_COR_BAD_TLP | /* Bad TLP Status */
+ PCI_ERR_COR_BAD_DLLP | /* Bad DLLP Status */
+ PCI_ERR_COR_REP_ROLL | /* REPLAY_NUM Rollover */
+ PCI_ERR_COR_REP_TIMER | /* Replay Timer Timeout */
+ PCI_ERR_COR_ADV_NFAT | /* Advisory Non-Fatal */
+ PCI_ERR_COR_INTERNAL | /* Corrected Internal */
+ PCI_ERR_COR_LOG_OVER; /* Header Log Overflow */
+ p_setd(perm, PCI_ERR_COR_STATUS, NO_VIRT, mask);
+ p_setd(perm, PCI_ERR_COR_MASK, NO_VIRT, mask);
+
+ mask = PCI_ERR_CAP_ECRC_GENE | /* ECRC Generation Enable */
+ PCI_ERR_CAP_ECRC_CHKE; /* ECRC Check Enable */
+ p_setd(perm, PCI_ERR_CAP, NO_VIRT, mask);
+ return 0;
+}
+
+/* Permissions for Power Budgeting extended capability */
+static int __init init_pci_ext_cap_pwr_perm(struct perm_bits *perm)
+{
+ if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_PWR]))
+ return -ENOMEM;
+
+ p_setd(perm, 0, ALL_VIRT, NO_WRITE);
+
+ /* Writing the data selector is OK, the info is still read-only */
+ p_setb(perm, PCI_PWR_DATA, NO_VIRT, (u8)ALL_WRITE);
+ return 0;
+}
+
+/*
+ * Initialize the shared permission tables
+ */
+void vfio_pci_uninit_perm_bits(void)
+{
+ free_perm_bits(&cap_perms[PCI_CAP_ID_BASIC]);
+
+ free_perm_bits(&cap_perms[PCI_CAP_ID_PM]);
+ free_perm_bits(&cap_perms[PCI_CAP_ID_VPD]);
+ free_perm_bits(&cap_perms[PCI_CAP_ID_PCIX]);
+ free_perm_bits(&cap_perms[PCI_CAP_ID_EXP]);
+ free_perm_bits(&cap_perms[PCI_CAP_ID_AF]);
+
+ free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_ERR]);
+ free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_PWR]);
+}
+
+int __init vfio_pci_init_perm_bits(void)
+{
+ int ret;
+
+ /* Basic config space */
+ ret = init_pci_cap_basic_perm(&cap_perms[PCI_CAP_ID_BASIC]);
+
+ /* Capabilities */
+ ret |= init_pci_cap_pm_perm(&cap_perms[PCI_CAP_ID_PM]);
+ ret |= init_pci_cap_vpd_perm(&cap_perms[PCI_CAP_ID_VPD]);
+ ret |= init_pci_cap_pcix_perm(&cap_perms[PCI_CAP_ID_PCIX]);
+ cap_perms[PCI_CAP_ID_VNDR].writefn = vfio_raw_config_write;
+ ret |= init_pci_cap_exp_perm(&cap_perms[PCI_CAP_ID_EXP]);
+ ret |= init_pci_cap_af_perm(&cap_perms[PCI_CAP_ID_AF]);
+
+ /* Extended capabilities */
+ ret |= init_pci_ext_cap_err_perm(&ecap_perms[PCI_EXT_CAP_ID_ERR]);
+ ret |= init_pci_ext_cap_pwr_perm(&ecap_perms[PCI_EXT_CAP_ID_PWR]);
+ ecap_perms[PCI_EXT_CAP_ID_VNDR].writefn = vfio_raw_config_write;
+
+ if (ret)
+ vfio_pci_uninit_perm_bits();
+
+ return ret;
+}
+
+static int vfio_find_cap_start(struct vfio_pci_core_device *vdev, int pos)
+{
+ u8 cap;
+ int base = (pos >= PCI_CFG_SPACE_SIZE) ? PCI_CFG_SPACE_SIZE :
+ PCI_STD_HEADER_SIZEOF;
+ cap = vdev->pci_config_map[pos];
+
+ if (cap == PCI_CAP_ID_BASIC)
+ return 0;
+
+ /* XXX Can we have to abutting capabilities of the same type? */
+ while (pos - 1 >= base && vdev->pci_config_map[pos - 1] == cap)
+ pos--;
+
+ return pos;
+}
+
+static int vfio_msi_config_read(struct vfio_pci_core_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 *val)
+{
+ /* Update max available queue size from msi_qmax */
+ if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) {
+ __le16 *flags;
+ int start;
+
+ start = vfio_find_cap_start(vdev, pos);
+
+ flags = (__le16 *)&vdev->vconfig[start];
+
+ *flags &= cpu_to_le16(~PCI_MSI_FLAGS_QMASK);
+ *flags |= cpu_to_le16(vdev->msi_qmax << 1);
+ }
+
+ return vfio_default_config_read(vdev, pos, count, perm, offset, val);
+}
+
+static int vfio_msi_config_write(struct vfio_pci_core_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0)
+ return count;
+
+ /* Fixup and write configured queue size and enable to hardware */
+ if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) {
+ __le16 *pflags;
+ u16 flags;
+ int start, ret;
+
+ start = vfio_find_cap_start(vdev, pos);
+
+ pflags = (__le16 *)&vdev->vconfig[start + PCI_MSI_FLAGS];
+
+ flags = le16_to_cpu(*pflags);
+
+ /* MSI is enabled via ioctl */
+ if (vdev->irq_type != VFIO_PCI_MSI_IRQ_INDEX)
+ flags &= ~PCI_MSI_FLAGS_ENABLE;
+
+ /* Check queue size */
+ if ((flags & PCI_MSI_FLAGS_QSIZE) >> 4 > vdev->msi_qmax) {
+ flags &= ~PCI_MSI_FLAGS_QSIZE;
+ flags |= vdev->msi_qmax << 4;
+ }
+
+ /* Write back to virt and to hardware */
+ *pflags = cpu_to_le16(flags);
+ ret = pci_user_write_config_word(vdev->pdev,
+ start + PCI_MSI_FLAGS,
+ flags);
+ if (ret)
+ return ret;
+ }
+
+ return count;
+}
+
+/*
+ * MSI determination is per-device, so this routine gets used beyond
+ * initialization time. Don't add __init
+ */
+static int init_pci_cap_msi_perm(struct perm_bits *perm, int len, u16 flags)
+{
+ if (alloc_perm_bits(perm, len))
+ return -ENOMEM;
+
+ perm->readfn = vfio_msi_config_read;
+ perm->writefn = vfio_msi_config_write;
+
+ p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
+
+ /*
+ * The upper byte of the control register is reserved,
+ * just setup the lower byte.
+ */
+ p_setb(perm, PCI_MSI_FLAGS, (u8)ALL_VIRT, (u8)ALL_WRITE);
+ p_setd(perm, PCI_MSI_ADDRESS_LO, ALL_VIRT, ALL_WRITE);
+ if (flags & PCI_MSI_FLAGS_64BIT) {
+ p_setd(perm, PCI_MSI_ADDRESS_HI, ALL_VIRT, ALL_WRITE);
+ p_setw(perm, PCI_MSI_DATA_64, (u16)ALL_VIRT, (u16)ALL_WRITE);
+ if (flags & PCI_MSI_FLAGS_MASKBIT) {
+ p_setd(perm, PCI_MSI_MASK_64, NO_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_MSI_PENDING_64, NO_VIRT, ALL_WRITE);
+ }
+ } else {
+ p_setw(perm, PCI_MSI_DATA_32, (u16)ALL_VIRT, (u16)ALL_WRITE);
+ if (flags & PCI_MSI_FLAGS_MASKBIT) {
+ p_setd(perm, PCI_MSI_MASK_32, NO_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_MSI_PENDING_32, NO_VIRT, ALL_WRITE);
+ }
+ }
+ return 0;
+}
+
+/* Determine MSI CAP field length; initialize msi_perms on 1st call per vdev */
+static int vfio_msi_cap_len(struct vfio_pci_core_device *vdev, u8 pos)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int len, ret;
+ u16 flags;
+
+ ret = pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &flags);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ len = 10; /* Minimum size */
+ if (flags & PCI_MSI_FLAGS_64BIT)
+ len += 4;
+ if (flags & PCI_MSI_FLAGS_MASKBIT)
+ len += 10;
+
+ if (vdev->msi_perm)
+ return len;
+
+ vdev->msi_perm = kmalloc(sizeof(struct perm_bits), GFP_KERNEL);
+ if (!vdev->msi_perm)
+ return -ENOMEM;
+
+ ret = init_pci_cap_msi_perm(vdev->msi_perm, len, flags);
+ if (ret) {
+ kfree(vdev->msi_perm);
+ return ret;
+ }
+
+ return len;
+}
+
+/* Determine extended capability length for VC (2 & 9) and MFVC */
+static int vfio_vc_cap_len(struct vfio_pci_core_device *vdev, u16 pos)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u32 tmp;
+ int ret, evcc, phases, vc_arb;
+ int len = PCI_CAP_VC_BASE_SIZEOF;
+
+ ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP1, &tmp);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ evcc = tmp & PCI_VC_CAP1_EVCC; /* extended vc count */
+ ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP2, &tmp);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ if (tmp & PCI_VC_CAP2_128_PHASE)
+ phases = 128;
+ else if (tmp & PCI_VC_CAP2_64_PHASE)
+ phases = 64;
+ else if (tmp & PCI_VC_CAP2_32_PHASE)
+ phases = 32;
+ else
+ phases = 0;
+
+ vc_arb = phases * 4;
+
+ /*
+ * Port arbitration tables are root & switch only;
+ * function arbitration tables are function 0 only.
+ * In either case, we'll never let user write them so
+ * we don't care how big they are
+ */
+ len += (1 + evcc) * PCI_CAP_VC_PER_VC_SIZEOF;
+ if (vc_arb) {
+ len = round_up(len, 16);
+ len += vc_arb / 8;
+ }
+ return len;
+}
+
+static int vfio_cap_len(struct vfio_pci_core_device *vdev, u8 cap, u8 pos)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u32 dword;
+ u16 word;
+ u8 byte;
+ int ret;
+
+ switch (cap) {
+ case PCI_CAP_ID_MSI:
+ return vfio_msi_cap_len(vdev, pos);
+ case PCI_CAP_ID_PCIX:
+ ret = pci_read_config_word(pdev, pos + PCI_X_CMD, &word);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ if (PCI_X_CMD_VERSION(word)) {
+ if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) {
+ /* Test for extended capabilities */
+ pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE,
+ &dword);
+ vdev->extended_caps = (dword != 0);
+ }
+ return PCI_CAP_PCIX_SIZEOF_V2;
+ } else
+ return PCI_CAP_PCIX_SIZEOF_V0;
+ case PCI_CAP_ID_VNDR:
+ /* length follows next field */
+ ret = pci_read_config_byte(pdev, pos + PCI_CAP_FLAGS, &byte);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ return byte;
+ case PCI_CAP_ID_EXP:
+ if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) {
+ /* Test for extended capabilities */
+ pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword);
+ vdev->extended_caps = (dword != 0);
+ }
+
+ /* length based on version and type */
+ if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) {
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END)
+ return 0xc; /* "All Devices" only, no link */
+ return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1;
+ } else {
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END)
+ return 0x2c; /* No link */
+ return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2;
+ }
+ case PCI_CAP_ID_HT:
+ ret = pci_read_config_byte(pdev, pos + 3, &byte);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ return (byte & HT_3BIT_CAP_MASK) ?
+ HT_CAP_SIZEOF_SHORT : HT_CAP_SIZEOF_LONG;
+ case PCI_CAP_ID_SATA:
+ ret = pci_read_config_byte(pdev, pos + PCI_SATA_REGS, &byte);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ byte &= PCI_SATA_REGS_MASK;
+ if (byte == PCI_SATA_REGS_INLINE)
+ return PCI_SATA_SIZEOF_LONG;
+ else
+ return PCI_SATA_SIZEOF_SHORT;
+ default:
+ pci_warn(pdev, "%s: unknown length for PCI cap %#x@%#x\n",
+ __func__, cap, pos);
+ }
+
+ return 0;
+}
+
+static int vfio_ext_cap_len(struct vfio_pci_core_device *vdev, u16 ecap, u16 epos)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u8 byte;
+ u32 dword;
+ int ret;
+
+ switch (ecap) {
+ case PCI_EXT_CAP_ID_VNDR:
+ ret = pci_read_config_dword(pdev, epos + PCI_VSEC_HDR, &dword);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ return dword >> PCI_VSEC_HDR_LEN_SHIFT;
+ case PCI_EXT_CAP_ID_VC:
+ case PCI_EXT_CAP_ID_VC9:
+ case PCI_EXT_CAP_ID_MFVC:
+ return vfio_vc_cap_len(vdev, epos);
+ case PCI_EXT_CAP_ID_ACS:
+ ret = pci_read_config_byte(pdev, epos + PCI_ACS_CAP, &byte);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ if (byte & PCI_ACS_EC) {
+ int bits;
+
+ ret = pci_read_config_byte(pdev,
+ epos + PCI_ACS_EGRESS_BITS,
+ &byte);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ bits = byte ? round_up(byte, 32) : 256;
+ return 8 + (bits / 8);
+ }
+ return 8;
+
+ case PCI_EXT_CAP_ID_REBAR:
+ ret = pci_read_config_byte(pdev, epos + PCI_REBAR_CTRL, &byte);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ byte &= PCI_REBAR_CTRL_NBAR_MASK;
+ byte >>= PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ return 4 + (byte * 8);
+ case PCI_EXT_CAP_ID_DPA:
+ ret = pci_read_config_byte(pdev, epos + PCI_DPA_CAP, &byte);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ byte &= PCI_DPA_CAP_SUBSTATE_MASK;
+ return PCI_DPA_BASE_SIZEOF + byte + 1;
+ case PCI_EXT_CAP_ID_TPH:
+ ret = pci_read_config_dword(pdev, epos + PCI_TPH_CAP, &dword);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) {
+ int sts;
+
+ sts = dword & PCI_TPH_CAP_ST_MASK;
+ sts >>= PCI_TPH_CAP_ST_SHIFT;
+ return PCI_TPH_BASE_SIZEOF + (sts * 2) + 2;
+ }
+ return PCI_TPH_BASE_SIZEOF;
+ default:
+ pci_warn(pdev, "%s: unknown length for PCI ecap %#x@%#x\n",
+ __func__, ecap, epos);
+ }
+
+ return 0;
+}
+
+static void vfio_update_pm_vconfig_bytes(struct vfio_pci_core_device *vdev,
+ int offset)
+{
+ __le16 *pmc = (__le16 *)&vdev->vconfig[offset + PCI_PM_PMC];
+ __le16 *ctrl = (__le16 *)&vdev->vconfig[offset + PCI_PM_CTRL];
+
+ /* Clear vconfig PME_Support, PME_Status, and PME_En bits */
+ *pmc &= ~cpu_to_le16(PCI_PM_CAP_PME_MASK);
+ *ctrl &= ~cpu_to_le16(PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS);
+}
+
+static int vfio_fill_vconfig_bytes(struct vfio_pci_core_device *vdev,
+ int offset, int size)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int ret = 0;
+
+ /*
+ * We try to read physical config space in the largest chunks
+ * we can, assuming that all of the fields support dword access.
+ * pci_save_state() makes this same assumption and seems to do ok.
+ */
+ while (size) {
+ int filled;
+
+ if (size >= 4 && !(offset % 4)) {
+ __le32 *dwordp = (__le32 *)&vdev->vconfig[offset];
+ u32 dword;
+
+ ret = pci_read_config_dword(pdev, offset, &dword);
+ if (ret)
+ return ret;
+ *dwordp = cpu_to_le32(dword);
+ filled = 4;
+ } else if (size >= 2 && !(offset % 2)) {
+ __le16 *wordp = (__le16 *)&vdev->vconfig[offset];
+ u16 word;
+
+ ret = pci_read_config_word(pdev, offset, &word);
+ if (ret)
+ return ret;
+ *wordp = cpu_to_le16(word);
+ filled = 2;
+ } else {
+ u8 *byte = &vdev->vconfig[offset];
+ ret = pci_read_config_byte(pdev, offset, byte);
+ if (ret)
+ return ret;
+ filled = 1;
+ }
+
+ offset += filled;
+ size -= filled;
+ }
+
+ return ret;
+}
+
+static int vfio_cap_init(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u8 *map = vdev->pci_config_map;
+ u16 status;
+ u8 pos, *prev, cap;
+ int loops, ret, caps = 0;
+
+ /* Any capabilities? */
+ ret = pci_read_config_word(pdev, PCI_STATUS, &status);
+ if (ret)
+ return ret;
+
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0; /* Done */
+
+ ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
+ if (ret)
+ return ret;
+
+ /* Mark the previous position in case we want to skip a capability */
+ prev = &vdev->vconfig[PCI_CAPABILITY_LIST];
+
+ /* We can bound our loop, capabilities are dword aligned */
+ loops = (PCI_CFG_SPACE_SIZE - PCI_STD_HEADER_SIZEOF) / PCI_CAP_SIZEOF;
+ while (pos && loops--) {
+ u8 next;
+ int i, len = 0;
+
+ ret = pci_read_config_byte(pdev, pos, &cap);
+ if (ret)
+ return ret;
+
+ ret = pci_read_config_byte(pdev,
+ pos + PCI_CAP_LIST_NEXT, &next);
+ if (ret)
+ return ret;
+
+ /*
+ * ID 0 is a NULL capability, conflicting with our fake
+ * PCI_CAP_ID_BASIC. As it has no content, consider it
+ * hidden for now.
+ */
+ if (cap && cap <= PCI_CAP_ID_MAX) {
+ len = pci_cap_length[cap];
+ if (len == 0xFF) { /* Variable length */
+ len = vfio_cap_len(vdev, cap, pos);
+ if (len < 0)
+ return len;
+ }
+ }
+
+ if (!len) {
+ pci_info(pdev, "%s: hiding cap %#x@%#x\n", __func__,
+ cap, pos);
+ *prev = next;
+ pos = next;
+ continue;
+ }
+
+ /* Sanity check, do we overlap other capabilities? */
+ for (i = 0; i < len; i++) {
+ if (likely(map[pos + i] == PCI_CAP_ID_INVALID))
+ continue;
+
+ pci_warn(pdev, "%s: PCI config conflict @%#x, was cap %#x now cap %#x\n",
+ __func__, pos + i, map[pos + i], cap);
+ }
+
+ BUILD_BUG_ON(PCI_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT);
+
+ memset(map + pos, cap, len);
+ ret = vfio_fill_vconfig_bytes(vdev, pos, len);
+ if (ret)
+ return ret;
+
+ if (cap == PCI_CAP_ID_PM)
+ vfio_update_pm_vconfig_bytes(vdev, pos);
+
+ prev = &vdev->vconfig[pos + PCI_CAP_LIST_NEXT];
+ pos = next;
+ caps++;
+ }
+
+ /* If we didn't fill any capabilities, clear the status flag */
+ if (!caps) {
+ __le16 *vstatus = (__le16 *)&vdev->vconfig[PCI_STATUS];
+ *vstatus &= ~cpu_to_le16(PCI_STATUS_CAP_LIST);
+ }
+
+ return 0;
+}
+
+static int vfio_ecap_init(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u8 *map = vdev->pci_config_map;
+ u16 epos;
+ __le32 *prev = NULL;
+ int loops, ret, ecaps = 0;
+
+ if (!vdev->extended_caps)
+ return 0;
+
+ epos = PCI_CFG_SPACE_SIZE;
+
+ loops = (pdev->cfg_size - PCI_CFG_SPACE_SIZE) / PCI_CAP_SIZEOF;
+
+ while (loops-- && epos >= PCI_CFG_SPACE_SIZE) {
+ u32 header;
+ u16 ecap;
+ int i, len = 0;
+ bool hidden = false;
+
+ ret = pci_read_config_dword(pdev, epos, &header);
+ if (ret)
+ return ret;
+
+ ecap = PCI_EXT_CAP_ID(header);
+
+ if (ecap <= PCI_EXT_CAP_ID_MAX) {
+ len = pci_ext_cap_length[ecap];
+ if (len == 0xFF) {
+ len = vfio_ext_cap_len(vdev, ecap, epos);
+ if (len < 0)
+ return len;
+ }
+ }
+
+ if (!len) {
+ pci_info(pdev, "%s: hiding ecap %#x@%#x\n",
+ __func__, ecap, epos);
+
+ /* If not the first in the chain, we can skip over it */
+ if (prev) {
+ u32 val = epos = PCI_EXT_CAP_NEXT(header);
+ *prev &= cpu_to_le32(~(0xffcU << 20));
+ *prev |= cpu_to_le32(val << 20);
+ continue;
+ }
+
+ /*
+ * Otherwise, fill in a placeholder, the direct
+ * readfn will virtualize this automatically
+ */
+ len = PCI_CAP_SIZEOF;
+ hidden = true;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (likely(map[epos + i] == PCI_CAP_ID_INVALID))
+ continue;
+
+ pci_warn(pdev, "%s: PCI config conflict @%#x, was ecap %#x now ecap %#x\n",
+ __func__, epos + i, map[epos + i], ecap);
+ }
+
+ /*
+ * Even though ecap is 2 bytes, we're currently a long way
+ * from exceeding 1 byte capabilities. If we ever make it
+ * up to 0xFE we'll need to up this to a two-byte, byte map.
+ */
+ BUILD_BUG_ON(PCI_EXT_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT);
+
+ memset(map + epos, ecap, len);
+ ret = vfio_fill_vconfig_bytes(vdev, epos, len);
+ if (ret)
+ return ret;
+
+ /*
+ * If we're just using this capability to anchor the list,
+ * hide the real ID. Only count real ecaps. XXX PCI spec
+ * indicates to use cap id = 0, version = 0, next = 0 if
+ * ecaps are absent, hope users check all the way to next.
+ */
+ if (hidden)
+ *(__le32 *)&vdev->vconfig[epos] &=
+ cpu_to_le32((0xffcU << 20));
+ else
+ ecaps++;
+
+ prev = (__le32 *)&vdev->vconfig[epos];
+ epos = PCI_EXT_CAP_NEXT(header);
+ }
+
+ if (!ecaps)
+ *(u32 *)&vdev->vconfig[PCI_CFG_SPACE_SIZE] = 0;
+
+ return 0;
+}
+
+/*
+ * Nag about hardware bugs, hopefully to have vendors fix them, but at least
+ * to collect a list of dependencies for the VF INTx pin quirk below.
+ */
+static const struct pci_device_id known_bogus_vf_intx_pin[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x270c) },
+ {}
+};
+
+/*
+ * For each device we allocate a pci_config_map that indicates the
+ * capability occupying each dword and thus the struct perm_bits we
+ * use for read and write. We also allocate a virtualized config
+ * space which tracks reads and writes to bits that we emulate for
+ * the user. Initial values filled from device.
+ *
+ * Using shared struct perm_bits between all vfio-pci devices saves
+ * us from allocating cfg_size buffers for virt and write for every
+ * device. We could remove vconfig and allocate individual buffers
+ * for each area requiring emulated bits, but the array of pointers
+ * would be comparable in size (at least for standard config space).
+ */
+int vfio_config_init(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u8 *map, *vconfig;
+ int ret;
+
+ /*
+ * Config space, caps and ecaps are all dword aligned, so we could
+ * use one byte per dword to record the type. However, there are
+ * no requirements on the length of a capability, so the gap between
+ * capabilities needs byte granularity.
+ */
+ map = kmalloc(pdev->cfg_size, GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ vconfig = kmalloc(pdev->cfg_size, GFP_KERNEL);
+ if (!vconfig) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ vdev->pci_config_map = map;
+ vdev->vconfig = vconfig;
+
+ memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF);
+ memset(map + PCI_STD_HEADER_SIZEOF, PCI_CAP_ID_INVALID,
+ pdev->cfg_size - PCI_STD_HEADER_SIZEOF);
+
+ ret = vfio_fill_vconfig_bytes(vdev, 0, PCI_STD_HEADER_SIZEOF);
+ if (ret)
+ goto out;
+
+ vdev->bardirty = true;
+
+ /*
+ * XXX can we just pci_load_saved_state/pci_restore_state?
+ * may need to rebuild vconfig after that
+ */
+
+ /* For restore after reset */
+ vdev->rbar[0] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_0]);
+ vdev->rbar[1] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_1]);
+ vdev->rbar[2] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_2]);
+ vdev->rbar[3] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_3]);
+ vdev->rbar[4] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_4]);
+ vdev->rbar[5] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_5]);
+ vdev->rbar[6] = le32_to_cpu(*(__le32 *)&vconfig[PCI_ROM_ADDRESS]);
+
+ if (pdev->is_virtfn) {
+ *(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor);
+ *(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device);
+
+ /*
+ * Per SR-IOV spec rev 1.1, 3.4.1.18 the interrupt pin register
+ * does not apply to VFs and VFs must implement this register
+ * as read-only with value zero. Userspace is not readily able
+ * to identify whether a device is a VF and thus that the pin
+ * definition on the device is bogus should it violate this
+ * requirement. We already virtualize the pin register for
+ * other purposes, so we simply need to replace the bogus value
+ * and consider VFs when we determine INTx IRQ count.
+ */
+ if (vconfig[PCI_INTERRUPT_PIN] &&
+ !pci_match_id(known_bogus_vf_intx_pin, pdev))
+ pci_warn(pdev,
+ "Hardware bug: VF reports bogus INTx pin %d\n",
+ vconfig[PCI_INTERRUPT_PIN]);
+
+ vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
+ }
+ if (pdev->no_command_memory) {
+ /*
+ * VFs and devices that set pdev->no_command_memory do not
+ * implement the memory enable bit of the COMMAND register
+ * therefore we'll not have it set in our initial copy of
+ * config space after pci_enable_device(). For consistency
+ * with PFs, set the virtual enable bit here.
+ */
+ *(__le16 *)&vconfig[PCI_COMMAND] |=
+ cpu_to_le16(PCI_COMMAND_MEMORY);
+ }
+
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
+ vconfig[PCI_INTERRUPT_PIN] = 0;
+
+ ret = vfio_cap_init(vdev);
+ if (ret)
+ goto out;
+
+ ret = vfio_ecap_init(vdev);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ kfree(map);
+ vdev->pci_config_map = NULL;
+ kfree(vconfig);
+ vdev->vconfig = NULL;
+ return pcibios_err_to_errno(ret);
+}
+
+void vfio_config_free(struct vfio_pci_core_device *vdev)
+{
+ kfree(vdev->vconfig);
+ vdev->vconfig = NULL;
+ kfree(vdev->pci_config_map);
+ vdev->pci_config_map = NULL;
+ if (vdev->msi_perm) {
+ free_perm_bits(vdev->msi_perm);
+ kfree(vdev->msi_perm);
+ vdev->msi_perm = NULL;
+ }
+}
+
+/*
+ * Find the remaining number of bytes in a dword that match the given
+ * position. Stop at either the end of the capability or the dword boundary.
+ */
+static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_core_device *vdev,
+ loff_t pos)
+{
+ u8 cap = vdev->pci_config_map[pos];
+ size_t i;
+
+ for (i = 1; (pos + i) % 4 && vdev->pci_config_map[pos + i] == cap; i++)
+ /* nop */;
+
+ return i;
+}
+
+static ssize_t vfio_config_do_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ struct perm_bits *perm;
+ __le32 val = 0;
+ int cap_start = 0, offset;
+ u8 cap_id;
+ ssize_t ret;
+
+ if (*ppos < 0 || *ppos >= pdev->cfg_size ||
+ *ppos + count > pdev->cfg_size)
+ return -EFAULT;
+
+ /*
+ * Chop accesses into aligned chunks containing no more than a
+ * single capability. Caller increments to the next chunk.
+ */
+ count = min(count, vfio_pci_cap_remaining_dword(vdev, *ppos));
+ if (count >= 4 && !(*ppos % 4))
+ count = 4;
+ else if (count >= 2 && !(*ppos % 2))
+ count = 2;
+ else
+ count = 1;
+
+ ret = count;
+
+ cap_id = vdev->pci_config_map[*ppos];
+
+ if (cap_id == PCI_CAP_ID_INVALID) {
+ perm = &unassigned_perms;
+ cap_start = *ppos;
+ } else if (cap_id == PCI_CAP_ID_INVALID_VIRT) {
+ perm = &virt_perms;
+ cap_start = *ppos;
+ } else {
+ if (*ppos >= PCI_CFG_SPACE_SIZE) {
+ WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX);
+
+ perm = &ecap_perms[cap_id];
+ cap_start = vfio_find_cap_start(vdev, *ppos);
+ } else {
+ WARN_ON(cap_id > PCI_CAP_ID_MAX);
+
+ perm = &cap_perms[cap_id];
+
+ if (cap_id == PCI_CAP_ID_MSI)
+ perm = vdev->msi_perm;
+
+ if (cap_id > PCI_CAP_ID_BASIC)
+ cap_start = vfio_find_cap_start(vdev, *ppos);
+ }
+ }
+
+ WARN_ON(!cap_start && cap_id != PCI_CAP_ID_BASIC);
+ WARN_ON(cap_start > *ppos);
+
+ offset = *ppos - cap_start;
+
+ if (iswrite) {
+ if (!perm->writefn)
+ return ret;
+
+ if (copy_from_user(&val, buf, count))
+ return -EFAULT;
+
+ ret = perm->writefn(vdev, *ppos, count, perm, offset, val);
+ } else {
+ if (perm->readfn) {
+ ret = perm->readfn(vdev, *ppos, count,
+ perm, offset, &val);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (copy_to_user(buf, &val, count))
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ size_t done = 0;
+ int ret = 0;
+ loff_t pos = *ppos;
+
+ pos &= VFIO_PCI_OFFSET_MASK;
+
+ while (count) {
+ ret = vfio_config_do_rw(vdev, buf, count, &pos, iswrite);
+ if (ret < 0)
+ return ret;
+
+ count -= ret;
+ done += ret;
+ buf += ret;
+ pos += ret;
+ }
+
+ *ppos += done;
+
+ return done;
+}
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
new file mode 100644
index 000000000..e030c2120
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -0,0 +1,2565 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * Derived from original vfio:
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ * Author: Tom Lyon, pugs@cisco.com
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/aperture.h>
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/file.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/vgaarb.h>
+#include <linux/nospec.h>
+#include <linux/sched/mm.h>
+
+#include "vfio_pci_priv.h"
+
+#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
+#define DRIVER_DESC "core driver for VFIO based PCI devices"
+
+static bool nointxmask;
+static bool disable_vga;
+static bool disable_idle_d3;
+
+/* List of PF's that vfio_pci_core_sriov_configure() has been called on */
+static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
+static LIST_HEAD(vfio_pci_sriov_pfs);
+
+struct vfio_pci_dummy_resource {
+ struct resource resource;
+ int index;
+ struct list_head res_next;
+};
+
+struct vfio_pci_vf_token {
+ struct mutex lock;
+ uuid_t uuid;
+ int users;
+};
+
+struct vfio_pci_mmap_vma {
+ struct vm_area_struct *vma;
+ struct list_head vma_next;
+};
+
+static inline bool vfio_vga_disabled(void)
+{
+#ifdef CONFIG_VFIO_PCI_VGA
+ return disable_vga;
+#else
+ return true;
+#endif
+}
+
+/*
+ * Our VGA arbiter participation is limited since we don't know anything
+ * about the device itself. However, if the device is the only VGA device
+ * downstream of a bridge and VFIO VGA support is disabled, then we can
+ * safely return legacy VGA IO and memory as not decoded since the user
+ * has no way to get to it and routing can be disabled externally at the
+ * bridge.
+ */
+static unsigned int vfio_pci_set_decode(struct pci_dev *pdev, bool single_vga)
+{
+ struct pci_dev *tmp = NULL;
+ unsigned char max_busnr;
+ unsigned int decodes;
+
+ if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
+ VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+
+ max_busnr = pci_bus_max_busnr(pdev->bus);
+ decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+
+ while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
+ if (tmp == pdev ||
+ pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
+ pci_is_root_bus(tmp->bus))
+ continue;
+
+ if (tmp->bus->number >= pdev->bus->number &&
+ tmp->bus->number <= max_busnr) {
+ pci_dev_put(tmp);
+ decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+ break;
+ }
+ }
+
+ return decodes;
+}
+
+static void vfio_pci_probe_mmaps(struct vfio_pci_core_device *vdev)
+{
+ struct resource *res;
+ int i;
+ struct vfio_pci_dummy_resource *dummy_res;
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ int bar = i + PCI_STD_RESOURCES;
+
+ res = &vdev->pdev->resource[bar];
+
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
+ goto no_mmap;
+
+ if (!(res->flags & IORESOURCE_MEM))
+ goto no_mmap;
+
+ /*
+ * The PCI core shouldn't set up a resource with a
+ * type but zero size. But there may be bugs that
+ * cause us to do that.
+ */
+ if (!resource_size(res))
+ goto no_mmap;
+
+ if (resource_size(res) >= PAGE_SIZE) {
+ vdev->bar_mmap_supported[bar] = true;
+ continue;
+ }
+
+ if (!(res->start & ~PAGE_MASK)) {
+ /*
+ * Add a dummy resource to reserve the remainder
+ * of the exclusive page in case that hot-add
+ * device's bar is assigned into it.
+ */
+ dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
+ if (dummy_res == NULL)
+ goto no_mmap;
+
+ dummy_res->resource.name = "vfio sub-page reserved";
+ dummy_res->resource.start = res->end + 1;
+ dummy_res->resource.end = res->start + PAGE_SIZE - 1;
+ dummy_res->resource.flags = res->flags;
+ if (request_resource(res->parent,
+ &dummy_res->resource)) {
+ kfree(dummy_res);
+ goto no_mmap;
+ }
+ dummy_res->index = bar;
+ list_add(&dummy_res->res_next,
+ &vdev->dummy_resources_list);
+ vdev->bar_mmap_supported[bar] = true;
+ continue;
+ }
+ /*
+ * Here we don't handle the case when the BAR is not page
+ * aligned because we can't expect the BAR will be
+ * assigned into the same location in a page in guest
+ * when we passthrough the BAR. And it's hard to access
+ * this BAR in userspace because we have no way to get
+ * the BAR's location in a page.
+ */
+no_mmap:
+ vdev->bar_mmap_supported[bar] = false;
+ }
+}
+
+struct vfio_pci_group_info;
+static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set);
+static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
+ struct vfio_pci_group_info *groups);
+
+/*
+ * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
+ * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
+ * If a device implements the former but not the latter we would typically
+ * expect broken_intx_masking be set and require an exclusive interrupt.
+ * However since we do have control of the device's ability to assert INTx,
+ * we can instead pretend that the device does not implement INTx, virtualizing
+ * the pin register to report zero and maintaining DisINTx set on the host.
+ */
+static bool vfio_pci_nointx(struct pci_dev *pdev)
+{
+ switch (pdev->vendor) {
+ case PCI_VENDOR_ID_INTEL:
+ switch (pdev->device) {
+ /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
+ case 0x1572:
+ case 0x1574:
+ case 0x1580 ... 0x1581:
+ case 0x1583 ... 0x158b:
+ case 0x37d0 ... 0x37d2:
+ /* X550 */
+ case 0x1563:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ return false;
+}
+
+static void vfio_pci_probe_power_state(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u16 pmcsr;
+
+ if (!pdev->pm_cap)
+ return;
+
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+ vdev->needs_pm_restore = !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
+}
+
+/*
+ * pci_set_power_state() wrapper handling devices which perform a soft reset on
+ * D3->D0 transition. Save state prior to D0/1/2->D3, stash it on the vdev,
+ * restore when returned to D0. Saved separately from pci_saved_state for use
+ * by PM capability emulation and separately from pci_dev internal saved state
+ * to avoid it being overwritten and consumed around other resets.
+ */
+int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t state)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ bool needs_restore = false, needs_save = false;
+ int ret;
+
+ /* Prevent changing power state for PFs with VFs enabled */
+ if (pci_num_vf(pdev) && state > PCI_D0)
+ return -EBUSY;
+
+ if (vdev->needs_pm_restore) {
+ if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) {
+ pci_save_state(pdev);
+ needs_save = true;
+ }
+
+ if (pdev->current_state >= PCI_D3hot && state <= PCI_D0)
+ needs_restore = true;
+ }
+
+ ret = pci_set_power_state(pdev, state);
+
+ if (!ret) {
+ /* D3 might be unsupported via quirk, skip unless in D3 */
+ if (needs_save && pdev->current_state >= PCI_D3hot) {
+ /*
+ * The current PCI state will be saved locally in
+ * 'pm_save' during the D3hot transition. When the
+ * device state is changed to D0 again with the current
+ * function, then pci_store_saved_state() will restore
+ * the state and will free the memory pointed by
+ * 'pm_save'. There are few cases where the PCI power
+ * state can be changed to D0 without the involvement
+ * of the driver. For these cases, free the earlier
+ * allocated memory first before overwriting 'pm_save'
+ * to prevent the memory leak.
+ */
+ kfree(vdev->pm_save);
+ vdev->pm_save = pci_store_saved_state(pdev);
+ } else if (needs_restore) {
+ pci_load_and_free_saved_state(pdev, &vdev->pm_save);
+ pci_restore_state(pdev);
+ }
+ }
+
+ return ret;
+}
+
+static int vfio_pci_runtime_pm_entry(struct vfio_pci_core_device *vdev,
+ struct eventfd_ctx *efdctx)
+{
+ /*
+ * The vdev power related flags are protected with 'memory_lock'
+ * semaphore.
+ */
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ if (vdev->pm_runtime_engaged) {
+ up_write(&vdev->memory_lock);
+ return -EINVAL;
+ }
+
+ vdev->pm_runtime_engaged = true;
+ vdev->pm_wake_eventfd_ctx = efdctx;
+ pm_runtime_put_noidle(&vdev->pdev->dev);
+ up_write(&vdev->memory_lock);
+
+ return 0;
+}
+
+static int vfio_pci_core_pm_entry(struct vfio_device *device, u32 flags,
+ void __user *arg, size_t argsz)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(device, struct vfio_pci_core_device, vdev);
+ int ret;
+
+ ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 0);
+ if (ret != 1)
+ return ret;
+
+ /*
+ * Inside vfio_pci_runtime_pm_entry(), only the runtime PM usage count
+ * will be decremented. The pm_runtime_put() will be invoked again
+ * while returning from the ioctl and then the device can go into
+ * runtime suspended state.
+ */
+ return vfio_pci_runtime_pm_entry(vdev, NULL);
+}
+
+static int vfio_pci_core_pm_entry_with_wakeup(
+ struct vfio_device *device, u32 flags,
+ struct vfio_device_low_power_entry_with_wakeup __user *arg,
+ size_t argsz)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(device, struct vfio_pci_core_device, vdev);
+ struct vfio_device_low_power_entry_with_wakeup entry;
+ struct eventfd_ctx *efdctx;
+ int ret;
+
+ ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
+ sizeof(entry));
+ if (ret != 1)
+ return ret;
+
+ if (copy_from_user(&entry, arg, sizeof(entry)))
+ return -EFAULT;
+
+ if (entry.wakeup_eventfd < 0)
+ return -EINVAL;
+
+ efdctx = eventfd_ctx_fdget(entry.wakeup_eventfd);
+ if (IS_ERR(efdctx))
+ return PTR_ERR(efdctx);
+
+ ret = vfio_pci_runtime_pm_entry(vdev, efdctx);
+ if (ret)
+ eventfd_ctx_put(efdctx);
+
+ return ret;
+}
+
+static void __vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev)
+{
+ if (vdev->pm_runtime_engaged) {
+ vdev->pm_runtime_engaged = false;
+ pm_runtime_get_noresume(&vdev->pdev->dev);
+
+ if (vdev->pm_wake_eventfd_ctx) {
+ eventfd_ctx_put(vdev->pm_wake_eventfd_ctx);
+ vdev->pm_wake_eventfd_ctx = NULL;
+ }
+ }
+}
+
+static void vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev)
+{
+ /*
+ * The vdev power related flags are protected with 'memory_lock'
+ * semaphore.
+ */
+ down_write(&vdev->memory_lock);
+ __vfio_pci_runtime_pm_exit(vdev);
+ up_write(&vdev->memory_lock);
+}
+
+static int vfio_pci_core_pm_exit(struct vfio_device *device, u32 flags,
+ void __user *arg, size_t argsz)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(device, struct vfio_pci_core_device, vdev);
+ int ret;
+
+ ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 0);
+ if (ret != 1)
+ return ret;
+
+ /*
+ * The device is always in the active state here due to pm wrappers
+ * around ioctls. If the device had entered a low power state and
+ * pm_wake_eventfd_ctx is valid, vfio_pci_core_runtime_resume() has
+ * already signaled the eventfd and exited low power mode itself.
+ * pm_runtime_engaged protects the redundant call here.
+ */
+ vfio_pci_runtime_pm_exit(vdev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int vfio_pci_core_runtime_suspend(struct device *dev)
+{
+ struct vfio_pci_core_device *vdev = dev_get_drvdata(dev);
+
+ down_write(&vdev->memory_lock);
+ /*
+ * The user can move the device into D3hot state before invoking
+ * power management IOCTL. Move the device into D0 state here and then
+ * the pci-driver core runtime PM suspend function will move the device
+ * into the low power state. Also, for the devices which have
+ * NoSoftRst-, it will help in restoring the original state
+ * (saved locally in 'vdev->pm_save').
+ */
+ vfio_pci_set_power_state(vdev, PCI_D0);
+ up_write(&vdev->memory_lock);
+
+ /*
+ * If INTx is enabled, then mask INTx before going into the runtime
+ * suspended state and unmask the same in the runtime resume.
+ * If INTx has already been masked by the user, then
+ * vfio_pci_intx_mask() will return false and in that case, INTx
+ * should not be unmasked in the runtime resume.
+ */
+ vdev->pm_intx_masked = ((vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) &&
+ vfio_pci_intx_mask(vdev));
+
+ return 0;
+}
+
+static int vfio_pci_core_runtime_resume(struct device *dev)
+{
+ struct vfio_pci_core_device *vdev = dev_get_drvdata(dev);
+
+ /*
+ * Resume with a pm_wake_eventfd_ctx signals the eventfd and exit
+ * low power mode.
+ */
+ down_write(&vdev->memory_lock);
+ if (vdev->pm_wake_eventfd_ctx) {
+ eventfd_signal(vdev->pm_wake_eventfd_ctx, 1);
+ __vfio_pci_runtime_pm_exit(vdev);
+ }
+ up_write(&vdev->memory_lock);
+
+ if (vdev->pm_intx_masked)
+ vfio_pci_intx_unmask(vdev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+/*
+ * The pci-driver core runtime PM routines always save the device state
+ * before going into suspended state. If the device is going into low power
+ * state with only with runtime PM ops, then no explicit handling is needed
+ * for the devices which have NoSoftRst-.
+ */
+static const struct dev_pm_ops vfio_pci_core_pm_ops = {
+ SET_RUNTIME_PM_OPS(vfio_pci_core_runtime_suspend,
+ vfio_pci_core_runtime_resume,
+ NULL)
+};
+
+int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int ret;
+ u16 cmd;
+ u8 msix_pos;
+
+ if (!disable_idle_d3) {
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Don't allow our initial saved state to include busmaster */
+ pci_clear_master(pdev);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto out_power;
+
+ /* If reset fails because of the device lock, fail this path entirely */
+ ret = pci_try_reset_function(pdev);
+ if (ret == -EAGAIN)
+ goto out_disable_device;
+
+ vdev->reset_works = !ret;
+ pci_save_state(pdev);
+ vdev->pci_saved_state = pci_store_saved_state(pdev);
+ if (!vdev->pci_saved_state)
+ pci_dbg(pdev, "%s: Couldn't store saved state\n", __func__);
+
+ if (likely(!nointxmask)) {
+ if (vfio_pci_nointx(pdev)) {
+ pci_info(pdev, "Masking broken INTx support\n");
+ vdev->nointx = true;
+ pci_intx(pdev, 0);
+ } else
+ vdev->pci_2_3 = pci_intx_mask_supported(pdev);
+ }
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
+ cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+
+ ret = vfio_pci_zdev_open_device(vdev);
+ if (ret)
+ goto out_free_state;
+
+ ret = vfio_config_init(vdev);
+ if (ret)
+ goto out_free_zdev;
+
+ msix_pos = pdev->msix_cap;
+ if (msix_pos) {
+ u16 flags;
+ u32 table;
+
+ pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
+ pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
+
+ vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
+ vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
+ vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
+ } else
+ vdev->msix_bar = 0xFF;
+
+ if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
+ vdev->has_vga = true;
+
+
+ return 0;
+
+out_free_zdev:
+ vfio_pci_zdev_close_device(vdev);
+out_free_state:
+ kfree(vdev->pci_saved_state);
+ vdev->pci_saved_state = NULL;
+out_disable_device:
+ pci_disable_device(pdev);
+out_power:
+ if (!disable_idle_d3)
+ pm_runtime_put(&pdev->dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_enable);
+
+void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ struct vfio_pci_dummy_resource *dummy_res, *tmp;
+ struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp;
+ int i, bar;
+
+ /* For needs_reset */
+ lockdep_assert_held(&vdev->vdev.dev_set->lock);
+
+ /*
+ * This function can be invoked while the power state is non-D0.
+ * This non-D0 power state can be with or without runtime PM.
+ * vfio_pci_runtime_pm_exit() will internally increment the usage
+ * count corresponding to pm_runtime_put() called during low power
+ * feature entry and then pm_runtime_resume() will wake up the device,
+ * if the device has already gone into the suspended state. Otherwise,
+ * the vfio_pci_set_power_state() will change the device power state
+ * to D0.
+ */
+ vfio_pci_runtime_pm_exit(vdev);
+ pm_runtime_resume(&pdev->dev);
+
+ /*
+ * This function calls __pci_reset_function_locked() which internally
+ * can use pci_pm_reset() for the function reset. pci_pm_reset() will
+ * fail if the power state is non-D0. Also, for the devices which
+ * have NoSoftRst-, the reset function can cause the PCI config space
+ * reset without restoring the original state (saved locally in
+ * 'vdev->pm_save').
+ */
+ vfio_pci_set_power_state(vdev, PCI_D0);
+
+ /* Stop the device from further DMA */
+ pci_clear_master(pdev);
+
+ vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
+ VFIO_IRQ_SET_ACTION_TRIGGER,
+ vdev->irq_type, 0, 0, NULL);
+
+ /* Device closed, don't need mutex here */
+ list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
+ &vdev->ioeventfds_list, next) {
+ vfio_virqfd_disable(&ioeventfd->virqfd);
+ list_del(&ioeventfd->next);
+ kfree(ioeventfd);
+ }
+ vdev->ioeventfds_nr = 0;
+
+ vdev->virq_disabled = false;
+
+ for (i = 0; i < vdev->num_regions; i++)
+ vdev->region[i].ops->release(vdev, &vdev->region[i]);
+
+ vdev->num_regions = 0;
+ kfree(vdev->region);
+ vdev->region = NULL; /* don't krealloc a freed pointer */
+
+ vfio_config_free(vdev);
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ bar = i + PCI_STD_RESOURCES;
+ if (!vdev->barmap[bar])
+ continue;
+ pci_iounmap(pdev, vdev->barmap[bar]);
+ pci_release_selected_regions(pdev, 1 << bar);
+ vdev->barmap[bar] = NULL;
+ }
+
+ list_for_each_entry_safe(dummy_res, tmp,
+ &vdev->dummy_resources_list, res_next) {
+ list_del(&dummy_res->res_next);
+ release_resource(&dummy_res->resource);
+ kfree(dummy_res);
+ }
+
+ vdev->needs_reset = true;
+
+ vfio_pci_zdev_close_device(vdev);
+
+ /*
+ * If we have saved state, restore it. If we can reset the device,
+ * even better. Resetting with current state seems better than
+ * nothing, but saving and restoring current state without reset
+ * is just busy work.
+ */
+ if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
+ pci_info(pdev, "%s: Couldn't reload saved state\n", __func__);
+
+ if (!vdev->reset_works)
+ goto out;
+
+ pci_save_state(pdev);
+ }
+
+ /*
+ * Disable INTx and MSI, presumably to avoid spurious interrupts
+ * during reset. Stolen from pci_reset_function()
+ */
+ pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+
+ /*
+ * Try to get the locks ourselves to prevent a deadlock. The
+ * success of this is dependent on being able to lock the device,
+ * which is not always possible.
+ * We can not use the "try" reset interface here, which will
+ * overwrite the previously restored configuration information.
+ */
+ if (vdev->reset_works && pci_dev_trylock(pdev)) {
+ if (!__pci_reset_function_locked(pdev))
+ vdev->needs_reset = false;
+ pci_dev_unlock(pdev);
+ }
+
+ pci_restore_state(pdev);
+out:
+ pci_disable_device(pdev);
+
+ vfio_pci_dev_set_try_reset(vdev->vdev.dev_set);
+
+ /* Put the pm-runtime usage counter acquired during enable */
+ if (!disable_idle_d3)
+ pm_runtime_put(&pdev->dev);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
+
+void vfio_pci_core_close_device(struct vfio_device *core_vdev)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+
+ if (vdev->sriov_pf_core_dev) {
+ mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users);
+ vdev->sriov_pf_core_dev->vf_token->users--;
+ mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ }
+ vfio_spapr_pci_eeh_release(vdev->pdev);
+ vfio_pci_core_disable(vdev);
+
+ mutex_lock(&vdev->igate);
+ if (vdev->err_trigger) {
+ eventfd_ctx_put(vdev->err_trigger);
+ vdev->err_trigger = NULL;
+ }
+ if (vdev->req_trigger) {
+ eventfd_ctx_put(vdev->req_trigger);
+ vdev->req_trigger = NULL;
+ }
+ mutex_unlock(&vdev->igate);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_close_device);
+
+void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
+{
+ vfio_pci_probe_mmaps(vdev);
+ vfio_spapr_pci_eeh_open(vdev->pdev);
+
+ if (vdev->sriov_pf_core_dev) {
+ mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ vdev->sriov_pf_core_dev->vf_token->users++;
+ mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ }
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
+
+static int vfio_pci_get_irq_count(struct vfio_pci_core_device *vdev, int irq_type)
+{
+ if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
+ u8 pin;
+
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
+ vdev->nointx || vdev->pdev->is_virtfn)
+ return 0;
+
+ pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
+
+ return pin ? 1 : 0;
+ } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
+ u8 pos;
+ u16 flags;
+
+ pos = vdev->pdev->msi_cap;
+ if (pos) {
+ pci_read_config_word(vdev->pdev,
+ pos + PCI_MSI_FLAGS, &flags);
+ return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
+ }
+ } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
+ u8 pos;
+ u16 flags;
+
+ pos = vdev->pdev->msix_cap;
+ if (pos) {
+ pci_read_config_word(vdev->pdev,
+ pos + PCI_MSIX_FLAGS, &flags);
+
+ return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
+ }
+ } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
+ if (pci_is_pcie(vdev->pdev))
+ return 1;
+ } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
+{
+ (*(int *)data)++;
+ return 0;
+}
+
+struct vfio_pci_fill_info {
+ int max;
+ int cur;
+ struct vfio_pci_dependent_device *devices;
+};
+
+static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
+{
+ struct vfio_pci_fill_info *fill = data;
+ struct iommu_group *iommu_group;
+
+ if (fill->cur == fill->max)
+ return -EAGAIN; /* Something changed, try again */
+
+ iommu_group = iommu_group_get(&pdev->dev);
+ if (!iommu_group)
+ return -EPERM; /* Cannot reset non-isolated devices */
+
+ fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
+ fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
+ fill->devices[fill->cur].bus = pdev->bus->number;
+ fill->devices[fill->cur].devfn = pdev->devfn;
+ fill->cur++;
+ iommu_group_put(iommu_group);
+ return 0;
+}
+
+struct vfio_pci_group_info {
+ int count;
+ struct file **files;
+};
+
+static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
+{
+ for (; pdev; pdev = pdev->bus->self)
+ if (pdev->bus == slot->bus)
+ return (pdev->slot == slot);
+ return false;
+}
+
+struct vfio_pci_walk_info {
+ int (*fn)(struct pci_dev *pdev, void *data);
+ void *data;
+ struct pci_dev *pdev;
+ bool slot;
+ int ret;
+};
+
+static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
+{
+ struct vfio_pci_walk_info *walk = data;
+
+ if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
+ walk->ret = walk->fn(pdev, walk->data);
+
+ return walk->ret;
+}
+
+static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
+ int (*fn)(struct pci_dev *,
+ void *data), void *data,
+ bool slot)
+{
+ struct vfio_pci_walk_info walk = {
+ .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
+ };
+
+ pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
+
+ return walk.ret;
+}
+
+static int msix_mmappable_cap(struct vfio_pci_core_device *vdev,
+ struct vfio_info_cap *caps)
+{
+ struct vfio_info_cap_header header = {
+ .id = VFIO_REGION_INFO_CAP_MSIX_MAPPABLE,
+ .version = 1
+ };
+
+ return vfio_info_add_capability(caps, &header, sizeof(header));
+}
+
+int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev,
+ unsigned int type, unsigned int subtype,
+ const struct vfio_pci_regops *ops,
+ size_t size, u32 flags, void *data)
+{
+ struct vfio_pci_region *region;
+
+ region = krealloc(vdev->region,
+ (vdev->num_regions + 1) * sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ vdev->region = region;
+ vdev->region[vdev->num_regions].type = type;
+ vdev->region[vdev->num_regions].subtype = subtype;
+ vdev->region[vdev->num_regions].ops = ops;
+ vdev->region[vdev->num_regions].size = size;
+ vdev->region[vdev->num_regions].flags = flags;
+ vdev->region[vdev->num_regions].data = data;
+
+ vdev->num_regions++;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_register_dev_region);
+
+static int vfio_pci_ioctl_get_info(struct vfio_pci_core_device *vdev,
+ struct vfio_device_info __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
+ struct vfio_device_info info;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ unsigned long capsz;
+ int ret;
+
+ /* For backward compatibility, cannot require this */
+ capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
+
+ if (copy_from_user(&info, arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ if (info.argsz >= capsz) {
+ minsz = capsz;
+ info.cap_offset = 0;
+ }
+
+ info.flags = VFIO_DEVICE_FLAGS_PCI;
+
+ if (vdev->reset_works)
+ info.flags |= VFIO_DEVICE_FLAGS_RESET;
+
+ info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
+ info.num_irqs = VFIO_PCI_NUM_IRQS;
+
+ ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
+ if (ret && ret != -ENODEV) {
+ pci_warn(vdev->pdev,
+ "Failed to setup zPCI info capabilities\n");
+ return ret;
+ }
+
+ if (caps.size) {
+ info.flags |= VFIO_DEVICE_FLAGS_CAPS;
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user(arg + 1, caps.buf, caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
+ }
+ info.cap_offset = sizeof(*arg);
+ }
+
+ kfree(caps.buf);
+ }
+
+ return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
+}
+
+static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev,
+ struct vfio_region_info __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_region_info, offset);
+ struct pci_dev *pdev = vdev->pdev;
+ struct vfio_region_info info;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ int i, ret;
+
+ if (copy_from_user(&info, arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ switch (info.index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = pdev->cfg_size;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = pci_resource_len(pdev, info.index);
+ if (!info.size) {
+ info.flags = 0;
+ break;
+ }
+
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ if (vdev->bar_mmap_supported[info.index]) {
+ info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
+ if (info.index == vdev->msix_bar) {
+ ret = msix_mmappable_cap(vdev, &caps);
+ if (ret)
+ return ret;
+ }
+ }
+
+ break;
+ case VFIO_PCI_ROM_REGION_INDEX: {
+ void __iomem *io;
+ size_t size;
+ u16 cmd;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.flags = 0;
+
+ /* Report the BAR size, not the ROM size */
+ info.size = pci_resource_len(pdev, info.index);
+ if (!info.size) {
+ /* Shadow ROMs appear as PCI option ROMs */
+ if (pdev->resource[PCI_ROM_RESOURCE].flags &
+ IORESOURCE_ROM_SHADOW)
+ info.size = 0x20000;
+ else
+ break;
+ }
+
+ /*
+ * Is it really there? Enable memory decode for implicit access
+ * in pci_map_rom().
+ */
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ io = pci_map_rom(pdev, &size);
+ if (io) {
+ info.flags = VFIO_REGION_INFO_FLAG_READ;
+ pci_unmap_rom(pdev, io);
+ } else {
+ info.size = 0;
+ }
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+
+ break;
+ }
+ case VFIO_PCI_VGA_REGION_INDEX:
+ if (!vdev->has_vga)
+ return -EINVAL;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0xc0000;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+
+ break;
+ default: {
+ struct vfio_region_info_cap_type cap_type = {
+ .header.id = VFIO_REGION_INFO_CAP_TYPE,
+ .header.version = 1
+ };
+
+ if (info.index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+ return -EINVAL;
+ info.index = array_index_nospec(
+ info.index, VFIO_PCI_NUM_REGIONS + vdev->num_regions);
+
+ i = info.index - VFIO_PCI_NUM_REGIONS;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = vdev->region[i].size;
+ info.flags = vdev->region[i].flags;
+
+ cap_type.type = vdev->region[i].type;
+ cap_type.subtype = vdev->region[i].subtype;
+
+ ret = vfio_info_add_capability(&caps, &cap_type.header,
+ sizeof(cap_type));
+ if (ret)
+ return ret;
+
+ if (vdev->region[i].ops->add_capability) {
+ ret = vdev->region[i].ops->add_capability(
+ vdev, &vdev->region[i], &caps);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ if (caps.size) {
+ info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ info.cap_offset = 0;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user(arg + 1, caps.buf, caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
+ }
+ info.cap_offset = sizeof(*arg);
+ }
+
+ kfree(caps.buf);
+ }
+
+ return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
+}
+
+static int vfio_pci_ioctl_get_irq_info(struct vfio_pci_core_device *vdev,
+ struct vfio_irq_info __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_irq_info, count);
+ struct vfio_irq_info info;
+
+ if (copy_from_user(&info, arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
+ return -EINVAL;
+
+ switch (info.index) {
+ case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
+ case VFIO_PCI_REQ_IRQ_INDEX:
+ break;
+ case VFIO_PCI_ERR_IRQ_INDEX:
+ if (pci_is_pcie(vdev->pdev))
+ break;
+ fallthrough;
+ default:
+ return -EINVAL;
+ }
+
+ info.flags = VFIO_IRQ_INFO_EVENTFD;
+
+ info.count = vfio_pci_get_irq_count(vdev, info.index);
+
+ if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
+ info.flags |=
+ (VFIO_IRQ_INFO_MASKABLE | VFIO_IRQ_INFO_AUTOMASKED);
+ else
+ info.flags |= VFIO_IRQ_INFO_NORESIZE;
+
+ return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
+}
+
+static int vfio_pci_ioctl_set_irqs(struct vfio_pci_core_device *vdev,
+ struct vfio_irq_set __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_irq_set, count);
+ struct vfio_irq_set hdr;
+ u8 *data = NULL;
+ int max, ret = 0;
+ size_t data_size = 0;
+
+ if (copy_from_user(&hdr, arg, minsz))
+ return -EFAULT;
+
+ max = vfio_pci_get_irq_count(vdev, hdr.index);
+
+ ret = vfio_set_irqs_validate_and_prepare(&hdr, max, VFIO_PCI_NUM_IRQS,
+ &data_size);
+ if (ret)
+ return ret;
+
+ if (data_size) {
+ data = memdup_user(&arg->data, data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ }
+
+ mutex_lock(&vdev->igate);
+
+ ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index, hdr.start,
+ hdr.count, data);
+
+ mutex_unlock(&vdev->igate);
+ kfree(data);
+
+ return ret;
+}
+
+static int vfio_pci_ioctl_reset(struct vfio_pci_core_device *vdev,
+ void __user *arg)
+{
+ int ret;
+
+ if (!vdev->reset_works)
+ return -EINVAL;
+
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+
+ /*
+ * This function can be invoked while the power state is non-D0. If
+ * pci_try_reset_function() has been called while the power state is
+ * non-D0, then pci_try_reset_function() will internally set the power
+ * state to D0 without vfio driver involvement. For the devices which
+ * have NoSoftRst-, the reset function can cause the PCI config space
+ * reset without restoring the original state (saved locally in
+ * 'vdev->pm_save').
+ */
+ vfio_pci_set_power_state(vdev, PCI_D0);
+
+ ret = pci_try_reset_function(vdev->pdev);
+ up_write(&vdev->memory_lock);
+
+ return ret;
+}
+
+static int vfio_pci_ioctl_get_pci_hot_reset_info(
+ struct vfio_pci_core_device *vdev,
+ struct vfio_pci_hot_reset_info __user *arg)
+{
+ unsigned long minsz =
+ offsetofend(struct vfio_pci_hot_reset_info, count);
+ struct vfio_pci_hot_reset_info hdr;
+ struct vfio_pci_fill_info fill = { 0 };
+ struct vfio_pci_dependent_device *devices = NULL;
+ bool slot = false;
+ int ret = 0;
+
+ if (copy_from_user(&hdr, arg, minsz))
+ return -EFAULT;
+
+ if (hdr.argsz < minsz)
+ return -EINVAL;
+
+ hdr.flags = 0;
+
+ /* Can we do a slot or bus reset or neither? */
+ if (!pci_probe_reset_slot(vdev->pdev->slot))
+ slot = true;
+ else if (pci_probe_reset_bus(vdev->pdev->bus))
+ return -ENODEV;
+
+ /* How many devices are affected? */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
+ &fill.max, slot);
+ if (ret)
+ return ret;
+
+ WARN_ON(!fill.max); /* Should always be at least one */
+
+ /*
+ * If there's enough space, fill it now, otherwise return -ENOSPC and
+ * the number of devices affected.
+ */
+ if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
+ ret = -ENOSPC;
+ hdr.count = fill.max;
+ goto reset_info_exit;
+ }
+
+ devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
+ if (!devices)
+ return -ENOMEM;
+
+ fill.devices = devices;
+
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_fill_devs,
+ &fill, slot);
+
+ /*
+ * If a device was removed between counting and filling, we may come up
+ * short of fill.max. If a device was added, we'll have a return of
+ * -EAGAIN above.
+ */
+ if (!ret)
+ hdr.count = fill.cur;
+
+reset_info_exit:
+ if (copy_to_user(arg, &hdr, minsz))
+ ret = -EFAULT;
+
+ if (!ret) {
+ if (copy_to_user(&arg->devices, devices,
+ hdr.count * sizeof(*devices)))
+ ret = -EFAULT;
+ }
+
+ kfree(devices);
+ return ret;
+}
+
+static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_hot_reset __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_pci_hot_reset, count);
+ struct vfio_pci_hot_reset hdr;
+ int32_t *group_fds;
+ struct file **files;
+ struct vfio_pci_group_info info;
+ bool slot = false;
+ int file_idx, count = 0, ret = 0;
+
+ if (copy_from_user(&hdr, arg, minsz))
+ return -EFAULT;
+
+ if (hdr.argsz < minsz || hdr.flags)
+ return -EINVAL;
+
+ /* Can we do a slot or bus reset or neither? */
+ if (!pci_probe_reset_slot(vdev->pdev->slot))
+ slot = true;
+ else if (pci_probe_reset_bus(vdev->pdev->bus))
+ return -ENODEV;
+
+ /*
+ * We can't let userspace give us an arbitrarily large buffer to copy,
+ * so verify how many we think there could be. Note groups can have
+ * multiple devices so one group per device is the max.
+ */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
+ &count, slot);
+ if (ret)
+ return ret;
+
+ /* Somewhere between 1 and count is OK */
+ if (!hdr.count || hdr.count > count)
+ return -EINVAL;
+
+ group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
+ files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL);
+ if (!group_fds || !files) {
+ kfree(group_fds);
+ kfree(files);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(group_fds, arg->group_fds,
+ hdr.count * sizeof(*group_fds))) {
+ kfree(group_fds);
+ kfree(files);
+ return -EFAULT;
+ }
+
+ /*
+ * For each group_fd, get the group through the vfio external user
+ * interface and store the group and iommu ID. This ensures the group
+ * is held across the reset.
+ */
+ for (file_idx = 0; file_idx < hdr.count; file_idx++) {
+ struct file *file = fget(group_fds[file_idx]);
+
+ if (!file) {
+ ret = -EBADF;
+ break;
+ }
+
+ /* Ensure the FD is a vfio group FD.*/
+ if (!vfio_file_is_group(file)) {
+ fput(file);
+ ret = -EINVAL;
+ break;
+ }
+
+ files[file_idx] = file;
+ }
+
+ kfree(group_fds);
+
+ /* release reference to groups on error */
+ if (ret)
+ goto hot_reset_release;
+
+ info.count = hdr.count;
+ info.files = files;
+
+ ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
+
+hot_reset_release:
+ for (file_idx--; file_idx >= 0; file_idx--)
+ fput(files[file_idx]);
+
+ kfree(files);
+ return ret;
+}
+
+static int vfio_pci_ioctl_ioeventfd(struct vfio_pci_core_device *vdev,
+ struct vfio_device_ioeventfd __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_device_ioeventfd, fd);
+ struct vfio_device_ioeventfd ioeventfd;
+ int count;
+
+ if (copy_from_user(&ioeventfd, arg, minsz))
+ return -EFAULT;
+
+ if (ioeventfd.argsz < minsz)
+ return -EINVAL;
+
+ if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
+ return -EINVAL;
+
+ count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
+
+ if (hweight8(count) != 1 || ioeventfd.fd < -1)
+ return -EINVAL;
+
+ return vfio_pci_ioeventfd(vdev, ioeventfd.offset, ioeventfd.data, count,
+ ioeventfd.fd);
+}
+
+long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ void __user *uarg = (void __user *)arg;
+
+ switch (cmd) {
+ case VFIO_DEVICE_GET_INFO:
+ return vfio_pci_ioctl_get_info(vdev, uarg);
+ case VFIO_DEVICE_GET_IRQ_INFO:
+ return vfio_pci_ioctl_get_irq_info(vdev, uarg);
+ case VFIO_DEVICE_GET_PCI_HOT_RESET_INFO:
+ return vfio_pci_ioctl_get_pci_hot_reset_info(vdev, uarg);
+ case VFIO_DEVICE_GET_REGION_INFO:
+ return vfio_pci_ioctl_get_region_info(vdev, uarg);
+ case VFIO_DEVICE_IOEVENTFD:
+ return vfio_pci_ioctl_ioeventfd(vdev, uarg);
+ case VFIO_DEVICE_PCI_HOT_RESET:
+ return vfio_pci_ioctl_pci_hot_reset(vdev, uarg);
+ case VFIO_DEVICE_RESET:
+ return vfio_pci_ioctl_reset(vdev, uarg);
+ case VFIO_DEVICE_SET_IRQS:
+ return vfio_pci_ioctl_set_irqs(vdev, uarg);
+ default:
+ return -ENOTTY;
+ }
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl);
+
+static int vfio_pci_core_feature_token(struct vfio_device *device, u32 flags,
+ uuid_t __user *arg, size_t argsz)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(device, struct vfio_pci_core_device, vdev);
+ uuid_t uuid;
+ int ret;
+
+ if (!vdev->vf_token)
+ return -ENOTTY;
+ /*
+ * We do not support GET of the VF Token UUID as this could
+ * expose the token of the previous device user.
+ */
+ ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
+ sizeof(uuid));
+ if (ret != 1)
+ return ret;
+
+ if (copy_from_user(&uuid, arg, sizeof(uuid)))
+ return -EFAULT;
+
+ mutex_lock(&vdev->vf_token->lock);
+ uuid_copy(&vdev->vf_token->uuid, &uuid);
+ mutex_unlock(&vdev->vf_token->lock);
+ return 0;
+}
+
+int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
+ void __user *arg, size_t argsz)
+{
+ switch (flags & VFIO_DEVICE_FEATURE_MASK) {
+ case VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY:
+ return vfio_pci_core_pm_entry(device, flags, arg, argsz);
+ case VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP:
+ return vfio_pci_core_pm_entry_with_wakeup(device, flags,
+ arg, argsz);
+ case VFIO_DEVICE_FEATURE_LOW_POWER_EXIT:
+ return vfio_pci_core_pm_exit(device, flags, arg, argsz);
+ case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN:
+ return vfio_pci_core_feature_token(device, flags, arg, argsz);
+ default:
+ return -ENOTTY;
+ }
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl_feature);
+
+static ssize_t vfio_pci_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ int ret;
+
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+ return -EINVAL;
+
+ ret = pm_runtime_resume_and_get(&vdev->pdev->dev);
+ if (ret) {
+ pci_info_ratelimited(vdev->pdev, "runtime resume failed %d\n",
+ ret);
+ return -EIO;
+ }
+
+ switch (index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ ret = vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
+ break;
+
+ case VFIO_PCI_ROM_REGION_INDEX:
+ if (iswrite)
+ ret = -EINVAL;
+ else
+ ret = vfio_pci_bar_rw(vdev, buf, count, ppos, false);
+ break;
+
+ case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ ret = vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
+ break;
+
+ case VFIO_PCI_VGA_REGION_INDEX:
+ ret = vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
+ break;
+
+ default:
+ index -= VFIO_PCI_NUM_REGIONS;
+ ret = vdev->region[index].ops->rw(vdev, buf,
+ count, ppos, iswrite);
+ break;
+ }
+
+ pm_runtime_put(&vdev->pdev->dev);
+ return ret;
+}
+
+ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+
+ if (!count)
+ return 0;
+
+ return vfio_pci_rw(vdev, buf, count, ppos, false);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_read);
+
+ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+
+ if (!count)
+ return 0;
+
+ return vfio_pci_rw(vdev, (char __user *)buf, count, ppos, true);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_write);
+
+/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
+static int vfio_pci_zap_and_vma_lock(struct vfio_pci_core_device *vdev, bool try)
+{
+ struct vfio_pci_mmap_vma *mmap_vma, *tmp;
+
+ /*
+ * Lock ordering:
+ * vma_lock is nested under mmap_lock for vm_ops callback paths.
+ * The memory_lock semaphore is used by both code paths calling
+ * into this function to zap vmas and the vm_ops.fault callback
+ * to protect the memory enable state of the device.
+ *
+ * When zapping vmas we need to maintain the mmap_lock => vma_lock
+ * ordering, which requires using vma_lock to walk vma_list to
+ * acquire an mm, then dropping vma_lock to get the mmap_lock and
+ * reacquiring vma_lock. This logic is derived from similar
+ * requirements in uverbs_user_mmap_disassociate().
+ *
+ * mmap_lock must always be the top-level lock when it is taken.
+ * Therefore we can only hold the memory_lock write lock when
+ * vma_list is empty, as we'd need to take mmap_lock to clear
+ * entries. vma_list can only be guaranteed empty when holding
+ * vma_lock, thus memory_lock is nested under vma_lock.
+ *
+ * This enables the vm_ops.fault callback to acquire vma_lock,
+ * followed by memory_lock read lock, while already holding
+ * mmap_lock without risk of deadlock.
+ */
+ while (1) {
+ struct mm_struct *mm = NULL;
+
+ if (try) {
+ if (!mutex_trylock(&vdev->vma_lock))
+ return 0;
+ } else {
+ mutex_lock(&vdev->vma_lock);
+ }
+ while (!list_empty(&vdev->vma_list)) {
+ mmap_vma = list_first_entry(&vdev->vma_list,
+ struct vfio_pci_mmap_vma,
+ vma_next);
+ mm = mmap_vma->vma->vm_mm;
+ if (mmget_not_zero(mm))
+ break;
+
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
+ mm = NULL;
+ }
+ if (!mm)
+ return 1;
+ mutex_unlock(&vdev->vma_lock);
+
+ if (try) {
+ if (!mmap_read_trylock(mm)) {
+ mmput(mm);
+ return 0;
+ }
+ } else {
+ mmap_read_lock(mm);
+ }
+ if (try) {
+ if (!mutex_trylock(&vdev->vma_lock)) {
+ mmap_read_unlock(mm);
+ mmput(mm);
+ return 0;
+ }
+ } else {
+ mutex_lock(&vdev->vma_lock);
+ }
+ list_for_each_entry_safe(mmap_vma, tmp,
+ &vdev->vma_list, vma_next) {
+ struct vm_area_struct *vma = mmap_vma->vma;
+
+ if (vma->vm_mm != mm)
+ continue;
+
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
+
+ zap_vma_ptes(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start);
+ }
+ mutex_unlock(&vdev->vma_lock);
+ mmap_read_unlock(mm);
+ mmput(mm);
+ }
+}
+
+void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev)
+{
+ vfio_pci_zap_and_vma_lock(vdev, false);
+ down_write(&vdev->memory_lock);
+ mutex_unlock(&vdev->vma_lock);
+}
+
+u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev)
+{
+ u16 cmd;
+
+ down_write(&vdev->memory_lock);
+ pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
+ if (!(cmd & PCI_COMMAND_MEMORY))
+ pci_write_config_word(vdev->pdev, PCI_COMMAND,
+ cmd | PCI_COMMAND_MEMORY);
+
+ return cmd;
+}
+
+void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, u16 cmd)
+{
+ pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
+ up_write(&vdev->memory_lock);
+}
+
+/* Caller holds vma_lock */
+static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev,
+ struct vm_area_struct *vma)
+{
+ struct vfio_pci_mmap_vma *mmap_vma;
+
+ mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
+ if (!mmap_vma)
+ return -ENOMEM;
+
+ mmap_vma->vma = vma;
+ list_add(&mmap_vma->vma_next, &vdev->vma_list);
+
+ return 0;
+}
+
+/*
+ * Zap mmaps on open so that we can fault them in on access and therefore
+ * our vma_list only tracks mappings accessed since last zap.
+ */
+static void vfio_pci_mmap_open(struct vm_area_struct *vma)
+{
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+}
+
+static void vfio_pci_mmap_close(struct vm_area_struct *vma)
+{
+ struct vfio_pci_core_device *vdev = vma->vm_private_data;
+ struct vfio_pci_mmap_vma *mmap_vma;
+
+ mutex_lock(&vdev->vma_lock);
+ list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
+ if (mmap_vma->vma == vma) {
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
+ break;
+ }
+ }
+ mutex_unlock(&vdev->vma_lock);
+}
+
+static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct vfio_pci_core_device *vdev = vma->vm_private_data;
+ struct vfio_pci_mmap_vma *mmap_vma;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+
+ mutex_lock(&vdev->vma_lock);
+ down_read(&vdev->memory_lock);
+
+ /*
+ * Memory region cannot be accessed if the low power feature is engaged
+ * or memory access is disabled.
+ */
+ if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) {
+ ret = VM_FAULT_SIGBUS;
+ goto up_out;
+ }
+
+ /*
+ * We populate the whole vma on fault, so we need to test whether
+ * the vma has already been mapped, such as for concurrent faults
+ * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if
+ * we ask it to fill the same range again.
+ */
+ list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
+ if (mmap_vma->vma == vma)
+ goto up_out;
+ }
+
+ if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ ret = VM_FAULT_SIGBUS;
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+ goto up_out;
+ }
+
+ if (__vfio_pci_add_vma(vdev, vma)) {
+ ret = VM_FAULT_OOM;
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+ }
+
+up_out:
+ up_read(&vdev->memory_lock);
+ mutex_unlock(&vdev->vma_lock);
+ return ret;
+}
+
+static const struct vm_operations_struct vfio_pci_mmap_ops = {
+ .open = vfio_pci_mmap_open,
+ .close = vfio_pci_mmap_close,
+ .fault = vfio_pci_mmap_fault,
+};
+
+int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ struct pci_dev *pdev = vdev->pdev;
+ unsigned int index;
+ u64 phys_len, req_len, pgoff, req_start;
+ int ret;
+
+ index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+ return -EINVAL;
+ if (vma->vm_end < vma->vm_start)
+ return -EINVAL;
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+ if (index >= VFIO_PCI_NUM_REGIONS) {
+ int regnum = index - VFIO_PCI_NUM_REGIONS;
+ struct vfio_pci_region *region = vdev->region + regnum;
+
+ if (region->ops && region->ops->mmap &&
+ (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
+ return region->ops->mmap(vdev, region, vma);
+ return -EINVAL;
+ }
+ if (index >= VFIO_PCI_ROM_REGION_INDEX)
+ return -EINVAL;
+ if (!vdev->bar_mmap_supported[index])
+ return -EINVAL;
+
+ phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
+ req_len = vma->vm_end - vma->vm_start;
+ pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+ req_start = pgoff << PAGE_SHIFT;
+
+ if (req_start + req_len > phys_len)
+ return -EINVAL;
+
+ /*
+ * Even though we don't make use of the barmap for the mmap,
+ * we need to request the region and the barmap tracks that.
+ */
+ if (!vdev->barmap[index]) {
+ ret = pci_request_selected_regions(pdev,
+ 1 << index, "vfio-pci");
+ if (ret)
+ return ret;
+
+ vdev->barmap[index] = pci_iomap(pdev, index, 0);
+ if (!vdev->barmap[index]) {
+ pci_release_selected_regions(pdev, 1 << index);
+ return -ENOMEM;
+ }
+ }
+
+ vma->vm_private_data = vdev;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
+
+ /*
+ * See remap_pfn_range(), called from vfio_pci_fault() but we can't
+ * change vm_flags within the fault handler. Set them now.
+ */
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = &vfio_pci_mmap_ops;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_mmap);
+
+void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ struct pci_dev *pdev = vdev->pdev;
+
+ mutex_lock(&vdev->igate);
+
+ if (vdev->req_trigger) {
+ if (!(count % 10))
+ pci_notice_ratelimited(pdev,
+ "Relaying device request to user (#%u)\n",
+ count);
+ eventfd_signal(vdev->req_trigger, 1);
+ } else if (count == 0) {
+ pci_warn(pdev,
+ "No device request channel registered, blocked until released by user\n");
+ }
+
+ mutex_unlock(&vdev->igate);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_request);
+
+static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
+ bool vf_token, uuid_t *uuid)
+{
+ /*
+ * There's always some degree of trust or collaboration between SR-IOV
+ * PF and VFs, even if just that the PF hosts the SR-IOV capability and
+ * can disrupt VFs with a reset, but often the PF has more explicit
+ * access to deny service to the VF or access data passed through the
+ * VF. We therefore require an opt-in via a shared VF token (UUID) to
+ * represent this trust. This both prevents that a VF driver might
+ * assume the PF driver is a trusted, in-kernel driver, and also that
+ * a PF driver might be replaced with a rogue driver, unknown to in-use
+ * VF drivers.
+ *
+ * Therefore when presented with a VF, if the PF is a vfio device and
+ * it is bound to the vfio-pci driver, the user needs to provide a VF
+ * token to access the device, in the form of appending a vf_token to
+ * the device name, for example:
+ *
+ * "0000:04:10.0 vf_token=bd8d9d2b-5a5f-4f5a-a211-f591514ba1f3"
+ *
+ * When presented with a PF which has VFs in use, the user must also
+ * provide the current VF token to prove collaboration with existing
+ * VF users. If VFs are not in use, the VF token provided for the PF
+ * device will act to set the VF token.
+ *
+ * If the VF token is provided but unused, an error is generated.
+ */
+ if (vdev->pdev->is_virtfn) {
+ struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev;
+ bool match;
+
+ if (!pf_vdev) {
+ if (!vf_token)
+ return 0; /* PF is not vfio-pci, no VF token */
+
+ pci_info_ratelimited(vdev->pdev,
+ "VF token incorrectly provided, PF not bound to vfio-pci\n");
+ return -EINVAL;
+ }
+
+ if (!vf_token) {
+ pci_info_ratelimited(vdev->pdev,
+ "VF token required to access device\n");
+ return -EACCES;
+ }
+
+ mutex_lock(&pf_vdev->vf_token->lock);
+ match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
+ mutex_unlock(&pf_vdev->vf_token->lock);
+
+ if (!match) {
+ pci_info_ratelimited(vdev->pdev,
+ "Incorrect VF token provided for device\n");
+ return -EACCES;
+ }
+ } else if (vdev->vf_token) {
+ mutex_lock(&vdev->vf_token->lock);
+ if (vdev->vf_token->users) {
+ if (!vf_token) {
+ mutex_unlock(&vdev->vf_token->lock);
+ pci_info_ratelimited(vdev->pdev,
+ "VF token required to access device\n");
+ return -EACCES;
+ }
+
+ if (!uuid_equal(uuid, &vdev->vf_token->uuid)) {
+ mutex_unlock(&vdev->vf_token->lock);
+ pci_info_ratelimited(vdev->pdev,
+ "Incorrect VF token provided for device\n");
+ return -EACCES;
+ }
+ } else if (vf_token) {
+ uuid_copy(&vdev->vf_token->uuid, uuid);
+ }
+
+ mutex_unlock(&vdev->vf_token->lock);
+ } else if (vf_token) {
+ pci_info_ratelimited(vdev->pdev,
+ "VF token incorrectly provided, not a PF or VF\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define VF_TOKEN_ARG "vf_token="
+
+int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ bool vf_token = false;
+ uuid_t uuid;
+ int ret;
+
+ if (strncmp(pci_name(vdev->pdev), buf, strlen(pci_name(vdev->pdev))))
+ return 0; /* No match */
+
+ if (strlen(buf) > strlen(pci_name(vdev->pdev))) {
+ buf += strlen(pci_name(vdev->pdev));
+
+ if (*buf != ' ')
+ return 0; /* No match: non-whitespace after name */
+
+ while (*buf) {
+ if (*buf == ' ') {
+ buf++;
+ continue;
+ }
+
+ if (!vf_token && !strncmp(buf, VF_TOKEN_ARG,
+ strlen(VF_TOKEN_ARG))) {
+ buf += strlen(VF_TOKEN_ARG);
+
+ if (strlen(buf) < UUID_STRING_LEN)
+ return -EINVAL;
+
+ ret = uuid_parse(buf, &uuid);
+ if (ret)
+ return ret;
+
+ vf_token = true;
+ buf += UUID_STRING_LEN;
+ } else {
+ /* Unknown/duplicate option */
+ return -EINVAL;
+ }
+ }
+ }
+
+ ret = vfio_pci_validate_vf_token(vdev, vf_token, &uuid);
+ if (ret)
+ return ret;
+
+ return 1; /* Match */
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_match);
+
+static int vfio_pci_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct vfio_pci_core_device *vdev = container_of(nb,
+ struct vfio_pci_core_device, nb);
+ struct device *dev = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *physfn = pci_physfn(pdev);
+
+ if (action == BUS_NOTIFY_ADD_DEVICE &&
+ pdev->is_virtfn && physfn == vdev->pdev) {
+ pci_info(vdev->pdev, "Captured SR-IOV VF %s driver_override\n",
+ pci_name(pdev));
+ pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
+ vdev->vdev.ops->name);
+ } else if (action == BUS_NOTIFY_BOUND_DRIVER &&
+ pdev->is_virtfn && physfn == vdev->pdev) {
+ struct pci_driver *drv = pci_dev_driver(pdev);
+
+ if (drv && drv != pci_dev_driver(vdev->pdev))
+ pci_warn(vdev->pdev,
+ "VF %s bound to driver %s while PF bound to driver %s\n",
+ pci_name(pdev), drv->name,
+ pci_dev_driver(vdev->pdev)->name);
+ }
+
+ return 0;
+}
+
+static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ struct vfio_pci_core_device *cur;
+ struct pci_dev *physfn;
+ int ret;
+
+ if (pdev->is_virtfn) {
+ /*
+ * If this VF was created by our vfio_pci_core_sriov_configure()
+ * then we can find the PF vfio_pci_core_device now, and due to
+ * the locking in pci_disable_sriov() it cannot change until
+ * this VF device driver is removed.
+ */
+ physfn = pci_physfn(vdev->pdev);
+ mutex_lock(&vfio_pci_sriov_pfs_mutex);
+ list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) {
+ if (cur->pdev == physfn) {
+ vdev->sriov_pf_core_dev = cur;
+ break;
+ }
+ }
+ mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+ return 0;
+ }
+
+ /* Not a SRIOV PF */
+ if (!pdev->is_physfn)
+ return 0;
+
+ vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
+ if (!vdev->vf_token)
+ return -ENOMEM;
+
+ mutex_init(&vdev->vf_token->lock);
+ uuid_gen(&vdev->vf_token->uuid);
+
+ vdev->nb.notifier_call = vfio_pci_bus_notifier;
+ ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
+ if (ret) {
+ kfree(vdev->vf_token);
+ return ret;
+ }
+ return 0;
+}
+
+static void vfio_pci_vf_uninit(struct vfio_pci_core_device *vdev)
+{
+ if (!vdev->vf_token)
+ return;
+
+ bus_unregister_notifier(&pci_bus_type, &vdev->nb);
+ WARN_ON(vdev->vf_token->users);
+ mutex_destroy(&vdev->vf_token->lock);
+ kfree(vdev->vf_token);
+}
+
+static int vfio_pci_vga_init(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int ret;
+
+ if (!vfio_pci_is_vga(pdev))
+ return 0;
+
+ ret = aperture_remove_conflicting_pci_devices(pdev, vdev->vdev.ops->name);
+ if (ret)
+ return ret;
+
+ ret = vga_client_register(pdev, vfio_pci_set_decode);
+ if (ret)
+ return ret;
+ vga_set_legacy_decoding(pdev, vfio_pci_set_decode(pdev, false));
+ return 0;
+}
+
+static void vfio_pci_vga_uninit(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+
+ if (!vfio_pci_is_vga(pdev))
+ return;
+ vga_client_unregister(pdev);
+ vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
+ VGA_RSRC_LEGACY_IO |
+ VGA_RSRC_LEGACY_MEM);
+}
+
+int vfio_pci_core_init_dev(struct vfio_device *core_vdev)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+
+ vdev->pdev = to_pci_dev(core_vdev->dev);
+ vdev->irq_type = VFIO_PCI_NUM_IRQS;
+ mutex_init(&vdev->igate);
+ spin_lock_init(&vdev->irqlock);
+ mutex_init(&vdev->ioeventfds_lock);
+ INIT_LIST_HEAD(&vdev->dummy_resources_list);
+ INIT_LIST_HEAD(&vdev->ioeventfds_list);
+ mutex_init(&vdev->vma_lock);
+ INIT_LIST_HEAD(&vdev->vma_list);
+ INIT_LIST_HEAD(&vdev->sriov_pfs_item);
+ init_rwsem(&vdev->memory_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_init_dev);
+
+void vfio_pci_core_release_dev(struct vfio_device *core_vdev)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+
+ mutex_destroy(&vdev->igate);
+ mutex_destroy(&vdev->ioeventfds_lock);
+ mutex_destroy(&vdev->vma_lock);
+ kfree(vdev->region);
+ kfree(vdev->pm_save);
+ vfio_free_device(core_vdev);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_release_dev);
+
+int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ /* Drivers must set the vfio_pci_core_device to their drvdata */
+ if (WARN_ON(vdev != dev_get_drvdata(dev)))
+ return -EINVAL;
+
+ if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
+ return -EINVAL;
+
+ if (vdev->vdev.mig_ops) {
+ if (!(vdev->vdev.mig_ops->migration_get_state &&
+ vdev->vdev.mig_ops->migration_set_state) ||
+ !(vdev->vdev.migration_flags & VFIO_MIGRATION_STOP_COPY))
+ return -EINVAL;
+ }
+
+ if (vdev->vdev.log_ops && !(vdev->vdev.log_ops->log_start &&
+ vdev->vdev.log_ops->log_stop &&
+ vdev->vdev.log_ops->log_read_and_clear))
+ return -EINVAL;
+
+ /*
+ * Prevent binding to PFs with VFs enabled, the VFs might be in use
+ * by the host or other users. We cannot capture the VFs if they
+ * already exist, nor can we track VF users. Disabling SR-IOV here
+ * would initiate removing the VFs, which would unbind the driver,
+ * which is prone to blocking if that VF is also in use by vfio-pci.
+ * Just reject these PFs and let the user sort it out.
+ */
+ if (pci_num_vf(pdev)) {
+ pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
+ return -EBUSY;
+ }
+
+ if (pci_is_root_bus(pdev->bus)) {
+ ret = vfio_assign_device_set(&vdev->vdev, vdev);
+ } else if (!pci_probe_reset_slot(pdev->slot)) {
+ ret = vfio_assign_device_set(&vdev->vdev, pdev->slot);
+ } else {
+ /*
+ * If there is no slot reset support for this device, the whole
+ * bus needs to be grouped together to support bus-wide resets.
+ */
+ ret = vfio_assign_device_set(&vdev->vdev, pdev->bus);
+ }
+
+ if (ret)
+ return ret;
+ ret = vfio_pci_vf_init(vdev);
+ if (ret)
+ return ret;
+ ret = vfio_pci_vga_init(vdev);
+ if (ret)
+ goto out_vf;
+
+ vfio_pci_probe_power_state(vdev);
+
+ /*
+ * pci-core sets the device power state to an unknown value at
+ * bootup and after being removed from a driver. The only
+ * transition it allows from this unknown state is to D0, which
+ * typically happens when a driver calls pci_enable_device().
+ * We're not ready to enable the device yet, but we do want to
+ * be able to get to D3. Therefore first do a D0 transition
+ * before enabling runtime PM.
+ */
+ vfio_pci_set_power_state(vdev, PCI_D0);
+
+ dev->driver->pm = &vfio_pci_core_pm_ops;
+ pm_runtime_allow(dev);
+ if (!disable_idle_d3)
+ pm_runtime_put(dev);
+
+ ret = vfio_register_group_dev(&vdev->vdev);
+ if (ret)
+ goto out_power;
+ return 0;
+
+out_power:
+ if (!disable_idle_d3)
+ pm_runtime_get_noresume(dev);
+
+ pm_runtime_forbid(dev);
+out_vf:
+ vfio_pci_vf_uninit(vdev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_register_device);
+
+void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
+{
+ vfio_pci_core_sriov_configure(vdev, 0);
+
+ vfio_unregister_group_dev(&vdev->vdev);
+
+ vfio_pci_vf_uninit(vdev);
+ vfio_pci_vga_uninit(vdev);
+
+ if (!disable_idle_d3)
+ pm_runtime_get_noresume(&vdev->pdev->dev);
+
+ pm_runtime_forbid(&vdev->pdev->dev);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_unregister_device);
+
+pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
+
+ mutex_lock(&vdev->igate);
+
+ if (vdev->err_trigger)
+ eventfd_signal(vdev->err_trigger, 1);
+
+ mutex_unlock(&vdev->igate);
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
+
+int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
+ int nr_virtfn)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int ret = 0;
+
+ device_lock_assert(&pdev->dev);
+
+ if (nr_virtfn) {
+ mutex_lock(&vfio_pci_sriov_pfs_mutex);
+ /*
+ * The thread that adds the vdev to the list is the only thread
+ * that gets to call pci_enable_sriov() and we will only allow
+ * it to be called once without going through
+ * pci_disable_sriov()
+ */
+ if (!list_empty(&vdev->sriov_pfs_item)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs);
+ mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+
+ /*
+ * The PF power state should always be higher than the VF power
+ * state. The PF can be in low power state either with runtime
+ * power management (when there is no user) or PCI_PM_CTRL
+ * register write by the user. If PF is in the low power state,
+ * then change the power state to D0 first before enabling
+ * SR-IOV. Also, this function can be called at any time, and
+ * userspace PCI_PM_CTRL write can race against this code path,
+ * so protect the same with 'memory_lock'.
+ */
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ goto out_del;
+
+ down_write(&vdev->memory_lock);
+ vfio_pci_set_power_state(vdev, PCI_D0);
+ ret = pci_enable_sriov(pdev, nr_virtfn);
+ up_write(&vdev->memory_lock);
+ if (ret) {
+ pm_runtime_put(&pdev->dev);
+ goto out_del;
+ }
+ return nr_virtfn;
+ }
+
+ if (pci_num_vf(pdev)) {
+ pci_disable_sriov(pdev);
+ pm_runtime_put(&pdev->dev);
+ }
+
+out_del:
+ mutex_lock(&vfio_pci_sriov_pfs_mutex);
+ list_del_init(&vdev->sriov_pfs_item);
+out_unlock:
+ mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
+
+const struct pci_error_handlers vfio_pci_core_err_handlers = {
+ .error_detected = vfio_pci_core_aer_err_detected,
+};
+EXPORT_SYMBOL_GPL(vfio_pci_core_err_handlers);
+
+static bool vfio_dev_in_groups(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_group_info *groups)
+{
+ unsigned int i;
+
+ for (i = 0; i < groups->count; i++)
+ if (vfio_file_has_dev(groups->files[i], &vdev->vdev))
+ return true;
+ return false;
+}
+
+static int vfio_pci_is_device_in_set(struct pci_dev *pdev, void *data)
+{
+ struct vfio_device_set *dev_set = data;
+ struct vfio_device *cur;
+
+ list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
+ if (cur->dev == &pdev->dev)
+ return 0;
+ return -EBUSY;
+}
+
+/*
+ * vfio-core considers a group to be viable and will create a vfio_device even
+ * if some devices are bound to drivers like pci-stub or pcieport. Here we
+ * require all PCI devices to be inside our dev_set since that ensures they stay
+ * put and that every driver controlling the device can co-ordinate with the
+ * device reset.
+ *
+ * Returns the pci_dev to pass to pci_reset_bus() if every PCI device to be
+ * reset is inside the dev_set, and pci_reset_bus() can succeed. NULL otherwise.
+ */
+static struct pci_dev *
+vfio_pci_dev_set_resettable(struct vfio_device_set *dev_set)
+{
+ struct pci_dev *pdev;
+
+ lockdep_assert_held(&dev_set->lock);
+
+ /*
+ * By definition all PCI devices in the dev_set share the same PCI
+ * reset, so any pci_dev will have the same outcomes for
+ * pci_probe_reset_*() and pci_reset_bus().
+ */
+ pdev = list_first_entry(&dev_set->device_list,
+ struct vfio_pci_core_device,
+ vdev.dev_set_list)->pdev;
+
+ /* pci_reset_bus() is supported */
+ if (pci_probe_reset_slot(pdev->slot) && pci_probe_reset_bus(pdev->bus))
+ return NULL;
+
+ if (vfio_pci_for_each_slot_or_bus(pdev, vfio_pci_is_device_in_set,
+ dev_set,
+ !pci_probe_reset_slot(pdev->slot)))
+ return NULL;
+ return pdev;
+}
+
+static int vfio_pci_dev_set_pm_runtime_get(struct vfio_device_set *dev_set)
+{
+ struct vfio_pci_core_device *cur;
+ int ret;
+
+ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
+ ret = pm_runtime_resume_and_get(&cur->pdev->dev);
+ if (ret)
+ goto unwind;
+ }
+
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(cur, &dev_set->device_list,
+ vdev.dev_set_list)
+ pm_runtime_put(&cur->pdev->dev);
+
+ return ret;
+}
+
+/*
+ * We need to get memory_lock for each device, but devices can share mmap_lock,
+ * therefore we need to zap and hold the vma_lock for each device, and only then
+ * get each memory_lock.
+ */
+static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
+ struct vfio_pci_group_info *groups)
+{
+ struct vfio_pci_core_device *cur_mem;
+ struct vfio_pci_core_device *cur_vma;
+ struct vfio_pci_core_device *cur;
+ struct pci_dev *pdev;
+ bool is_mem = true;
+ int ret;
+
+ mutex_lock(&dev_set->lock);
+ cur_mem = list_first_entry(&dev_set->device_list,
+ struct vfio_pci_core_device,
+ vdev.dev_set_list);
+
+ pdev = vfio_pci_dev_set_resettable(dev_set);
+ if (!pdev) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /*
+ * Some of the devices in the dev_set can be in the runtime suspended
+ * state. Increment the usage count for all the devices in the dev_set
+ * before reset and decrement the same after reset.
+ */
+ ret = vfio_pci_dev_set_pm_runtime_get(dev_set);
+ if (ret)
+ goto err_unlock;
+
+ list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
+ /*
+ * Test whether all the affected devices are contained by the
+ * set of groups provided by the user.
+ */
+ if (!vfio_dev_in_groups(cur_vma, groups)) {
+ ret = -EINVAL;
+ goto err_undo;
+ }
+
+ /*
+ * Locking multiple devices is prone to deadlock, runaway and
+ * unwind if we hit contention.
+ */
+ if (!vfio_pci_zap_and_vma_lock(cur_vma, true)) {
+ ret = -EBUSY;
+ goto err_undo;
+ }
+ }
+ cur_vma = NULL;
+
+ list_for_each_entry(cur_mem, &dev_set->device_list, vdev.dev_set_list) {
+ if (!down_write_trylock(&cur_mem->memory_lock)) {
+ ret = -EBUSY;
+ goto err_undo;
+ }
+ mutex_unlock(&cur_mem->vma_lock);
+ }
+ cur_mem = NULL;
+
+ /*
+ * The pci_reset_bus() will reset all the devices in the bus.
+ * The power state can be non-D0 for some of the devices in the bus.
+ * For these devices, the pci_reset_bus() will internally set
+ * the power state to D0 without vfio driver involvement.
+ * For the devices which have NoSoftRst-, the reset function can
+ * cause the PCI config space reset without restoring the original
+ * state (saved locally in 'vdev->pm_save').
+ */
+ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
+ vfio_pci_set_power_state(cur, PCI_D0);
+
+ ret = pci_reset_bus(pdev);
+
+err_undo:
+ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
+ if (cur == cur_mem)
+ is_mem = false;
+ if (cur == cur_vma)
+ break;
+ if (is_mem)
+ up_write(&cur->memory_lock);
+ else
+ mutex_unlock(&cur->vma_lock);
+ }
+
+ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
+ pm_runtime_put(&cur->pdev->dev);
+err_unlock:
+ mutex_unlock(&dev_set->lock);
+ return ret;
+}
+
+static bool vfio_pci_dev_set_needs_reset(struct vfio_device_set *dev_set)
+{
+ struct vfio_pci_core_device *cur;
+ bool needs_reset = false;
+
+ /* No other VFIO device in the set can be open. */
+ if (vfio_device_set_open_count(dev_set) > 1)
+ return false;
+
+ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
+ needs_reset |= cur->needs_reset;
+ return needs_reset;
+}
+
+/*
+ * If a bus or slot reset is available for the provided dev_set and:
+ * - All of the devices affected by that bus or slot reset are unused
+ * - At least one of the affected devices is marked dirty via
+ * needs_reset (such as by lack of FLR support)
+ * Then attempt to perform that bus or slot reset.
+ */
+static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set)
+{
+ struct vfio_pci_core_device *cur;
+ struct pci_dev *pdev;
+ bool reset_done = false;
+
+ if (!vfio_pci_dev_set_needs_reset(dev_set))
+ return;
+
+ pdev = vfio_pci_dev_set_resettable(dev_set);
+ if (!pdev)
+ return;
+
+ /*
+ * Some of the devices in the bus can be in the runtime suspended
+ * state. Increment the usage count for all the devices in the dev_set
+ * before reset and decrement the same after reset.
+ */
+ if (!disable_idle_d3 && vfio_pci_dev_set_pm_runtime_get(dev_set))
+ return;
+
+ if (!pci_reset_bus(pdev))
+ reset_done = true;
+
+ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
+ if (reset_done)
+ cur->needs_reset = false;
+
+ if (!disable_idle_d3)
+ pm_runtime_put(&cur->pdev->dev);
+ }
+}
+
+void vfio_pci_core_set_params(bool is_nointxmask, bool is_disable_vga,
+ bool is_disable_idle_d3)
+{
+ nointxmask = is_nointxmask;
+ disable_vga = is_disable_vga;
+ disable_idle_d3 = is_disable_idle_d3;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_set_params);
+
+static void vfio_pci_core_cleanup(void)
+{
+ vfio_pci_uninit_perm_bits();
+}
+
+static int __init vfio_pci_core_init(void)
+{
+ /* Allocate shared config space permission data used by all devices */
+ return vfio_pci_init_perm_bits();
+}
+
+module_init(vfio_pci_core_init);
+module_exit(vfio_pci_core_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
new file mode 100644
index 000000000..5e6ca5926
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_igd.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VFIO PCI Intel Graphics support
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * Register a device specific region through which to provide read-only
+ * access to the Intel IGD opregion. The register defining the opregion
+ * address is also virtualized to prevent user modification.
+ */
+
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+
+#include "vfio_pci_priv.h"
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define OPREGION_SIZE (8 * 1024)
+#define OPREGION_PCI_ADDR 0xfc
+
+#define OPREGION_RVDA 0x3ba
+#define OPREGION_RVDS 0x3c2
+#define OPREGION_VERSION 0x16
+
+struct igd_opregion_vbt {
+ void *opregion;
+ void *vbt_ex;
+};
+
+/**
+ * igd_opregion_shift_copy() - Copy OpRegion to user buffer and shift position.
+ * @dst: User buffer ptr to copy to.
+ * @off: Offset to user buffer ptr. Increased by bytes on return.
+ * @src: Source buffer to copy from.
+ * @pos: Increased by bytes on return.
+ * @remaining: Decreased by bytes on return.
+ * @bytes: Bytes to copy and adjust off, pos and remaining.
+ *
+ * Copy OpRegion to offset from specific source ptr and shift the offset.
+ *
+ * Return: 0 on success, -EFAULT otherwise.
+ *
+ */
+static inline unsigned long igd_opregion_shift_copy(char __user *dst,
+ loff_t *off,
+ void *src,
+ loff_t *pos,
+ size_t *remaining,
+ size_t bytes)
+{
+ if (copy_to_user(dst + (*off), src, bytes))
+ return -EFAULT;
+
+ *off += bytes;
+ *pos += bytes;
+ *remaining -= bytes;
+
+ return 0;
+}
+
+static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
+ char __user *buf, size_t count, loff_t *ppos,
+ bool iswrite)
+{
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
+ struct igd_opregion_vbt *opregionvbt = vdev->region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK, off = 0;
+ size_t remaining;
+
+ if (pos >= vdev->region[i].size || iswrite)
+ return -EINVAL;
+
+ count = min_t(size_t, count, vdev->region[i].size - pos);
+ remaining = count;
+
+ /* Copy until OpRegion version */
+ if (remaining && pos < OPREGION_VERSION) {
+ size_t bytes = min_t(size_t, remaining, OPREGION_VERSION - pos);
+
+ if (igd_opregion_shift_copy(buf, &off,
+ opregionvbt->opregion + pos, &pos,
+ &remaining, bytes))
+ return -EFAULT;
+ }
+
+ /* Copy patched (if necessary) OpRegion version */
+ if (remaining && pos < OPREGION_VERSION + sizeof(__le16)) {
+ size_t bytes = min_t(size_t, remaining,
+ OPREGION_VERSION + sizeof(__le16) - pos);
+ __le16 version = *(__le16 *)(opregionvbt->opregion +
+ OPREGION_VERSION);
+
+ /* Patch to 2.1 if OpRegion 2.0 has extended VBT */
+ if (le16_to_cpu(version) == 0x0200 && opregionvbt->vbt_ex)
+ version = cpu_to_le16(0x0201);
+
+ if (igd_opregion_shift_copy(buf, &off,
+ (u8 *)&version +
+ (pos - OPREGION_VERSION),
+ &pos, &remaining, bytes))
+ return -EFAULT;
+ }
+
+ /* Copy until RVDA */
+ if (remaining && pos < OPREGION_RVDA) {
+ size_t bytes = min_t(size_t, remaining, OPREGION_RVDA - pos);
+
+ if (igd_opregion_shift_copy(buf, &off,
+ opregionvbt->opregion + pos, &pos,
+ &remaining, bytes))
+ return -EFAULT;
+ }
+
+ /* Copy modified (if necessary) RVDA */
+ if (remaining && pos < OPREGION_RVDA + sizeof(__le64)) {
+ size_t bytes = min_t(size_t, remaining,
+ OPREGION_RVDA + sizeof(__le64) - pos);
+ __le64 rvda = cpu_to_le64(opregionvbt->vbt_ex ?
+ OPREGION_SIZE : 0);
+
+ if (igd_opregion_shift_copy(buf, &off,
+ (u8 *)&rvda + (pos - OPREGION_RVDA),
+ &pos, &remaining, bytes))
+ return -EFAULT;
+ }
+
+ /* Copy the rest of OpRegion */
+ if (remaining && pos < OPREGION_SIZE) {
+ size_t bytes = min_t(size_t, remaining, OPREGION_SIZE - pos);
+
+ if (igd_opregion_shift_copy(buf, &off,
+ opregionvbt->opregion + pos, &pos,
+ &remaining, bytes))
+ return -EFAULT;
+ }
+
+ /* Copy extended VBT if exists */
+ if (remaining &&
+ copy_to_user(buf + off, opregionvbt->vbt_ex + (pos - OPREGION_SIZE),
+ remaining))
+ return -EFAULT;
+
+ *ppos += count;
+
+ return count;
+}
+
+static void vfio_pci_igd_release(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_region *region)
+{
+ struct igd_opregion_vbt *opregionvbt = region->data;
+
+ if (opregionvbt->vbt_ex)
+ memunmap(opregionvbt->vbt_ex);
+
+ memunmap(opregionvbt->opregion);
+ kfree(opregionvbt);
+}
+
+static const struct vfio_pci_regops vfio_pci_igd_regops = {
+ .rw = vfio_pci_igd_rw,
+ .release = vfio_pci_igd_release,
+};
+
+static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
+{
+ __le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR);
+ u32 addr, size;
+ struct igd_opregion_vbt *opregionvbt;
+ int ret;
+ u16 version;
+
+ ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr);
+ if (ret)
+ return ret;
+
+ if (!addr || !(~addr))
+ return -ENODEV;
+
+ opregionvbt = kzalloc(sizeof(*opregionvbt), GFP_KERNEL);
+ if (!opregionvbt)
+ return -ENOMEM;
+
+ opregionvbt->opregion = memremap(addr, OPREGION_SIZE, MEMREMAP_WB);
+ if (!opregionvbt->opregion) {
+ kfree(opregionvbt);
+ return -ENOMEM;
+ }
+
+ if (memcmp(opregionvbt->opregion, OPREGION_SIGNATURE, 16)) {
+ memunmap(opregionvbt->opregion);
+ kfree(opregionvbt);
+ return -EINVAL;
+ }
+
+ size = le32_to_cpu(*(__le32 *)(opregionvbt->opregion + 16));
+ if (!size) {
+ memunmap(opregionvbt->opregion);
+ kfree(opregionvbt);
+ return -EINVAL;
+ }
+
+ size *= 1024; /* In KB */
+
+ /*
+ * OpRegion and VBT:
+ * When VBT data doesn't exceed 6KB, it's stored in Mailbox #4.
+ * When VBT data exceeds 6KB size, Mailbox #4 is no longer large enough
+ * to hold the VBT data, the Extended VBT region is introduced since
+ * OpRegion 2.0 to hold the VBT data. Since OpRegion 2.0, RVDA/RVDS are
+ * introduced to define the extended VBT data location and size.
+ * OpRegion 2.0: RVDA defines the absolute physical address of the
+ * extended VBT data, RVDS defines the VBT data size.
+ * OpRegion 2.1 and above: RVDA defines the relative address of the
+ * extended VBT data to OpRegion base, RVDS defines the VBT data size.
+ *
+ * Due to the RVDA definition diff in OpRegion VBT (also the only diff
+ * between 2.0 and 2.1), exposing OpRegion and VBT as a contiguous range
+ * for OpRegion 2.0 and above makes it possible to support the
+ * non-contiguous VBT through a single vfio region. From r/w ops view,
+ * only contiguous VBT after OpRegion with version 2.1+ is exposed,
+ * regardless the host OpRegion is 2.0 or non-contiguous 2.1+. The r/w
+ * ops will on-the-fly shift the actural offset into VBT so that data at
+ * correct position can be returned to the requester.
+ */
+ version = le16_to_cpu(*(__le16 *)(opregionvbt->opregion +
+ OPREGION_VERSION));
+ if (version >= 0x0200) {
+ u64 rvda = le64_to_cpu(*(__le64 *)(opregionvbt->opregion +
+ OPREGION_RVDA));
+ u32 rvds = le32_to_cpu(*(__le32 *)(opregionvbt->opregion +
+ OPREGION_RVDS));
+
+ /* The extended VBT is valid only when RVDA/RVDS are non-zero */
+ if (rvda && rvds) {
+ size += rvds;
+
+ /*
+ * Extended VBT location by RVDA:
+ * Absolute physical addr for 2.0.
+ * Relative addr to OpRegion header for 2.1+.
+ */
+ if (version == 0x0200)
+ addr = rvda;
+ else
+ addr += rvda;
+
+ opregionvbt->vbt_ex = memremap(addr, rvds, MEMREMAP_WB);
+ if (!opregionvbt->vbt_ex) {
+ memunmap(opregionvbt->opregion);
+ kfree(opregionvbt);
+ return -ENOMEM;
+ }
+ }
+ }
+
+ ret = vfio_pci_core_register_dev_region(vdev,
+ PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &vfio_pci_igd_regops,
+ size, VFIO_REGION_INFO_FLAG_READ, opregionvbt);
+ if (ret) {
+ if (opregionvbt->vbt_ex)
+ memunmap(opregionvbt->vbt_ex);
+
+ memunmap(opregionvbt->opregion);
+ kfree(opregionvbt);
+ return ret;
+ }
+
+ /* Fill vconfig with the hw value and virtualize register */
+ *dwordp = cpu_to_le32(addr);
+ memset(vdev->pci_config_map + OPREGION_PCI_ADDR,
+ PCI_CAP_ID_INVALID_VIRT, 4);
+
+ return ret;
+}
+
+static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev,
+ char __user *buf, size_t count, loff_t *ppos,
+ bool iswrite)
+{
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
+ struct pci_dev *pdev = vdev->region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ size_t size;
+ int ret;
+
+ if (pos >= vdev->region[i].size || iswrite)
+ return -EINVAL;
+
+ size = count = min(count, (size_t)(vdev->region[i].size - pos));
+
+ if ((pos & 1) && size) {
+ u8 val;
+
+ ret = pci_user_read_config_byte(pdev, pos, &val);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(buf + count - size, &val, 1))
+ return -EFAULT;
+
+ pos++;
+ size--;
+ }
+
+ if ((pos & 3) && size > 2) {
+ u16 val;
+ __le16 lval;
+
+ ret = pci_user_read_config_word(pdev, pos, &val);
+ if (ret)
+ return ret;
+
+ lval = cpu_to_le16(val);
+ if (copy_to_user(buf + count - size, &lval, 2))
+ return -EFAULT;
+
+ pos += 2;
+ size -= 2;
+ }
+
+ while (size > 3) {
+ u32 val;
+ __le32 lval;
+
+ ret = pci_user_read_config_dword(pdev, pos, &val);
+ if (ret)
+ return ret;
+
+ lval = cpu_to_le32(val);
+ if (copy_to_user(buf + count - size, &lval, 4))
+ return -EFAULT;
+
+ pos += 4;
+ size -= 4;
+ }
+
+ while (size >= 2) {
+ u16 val;
+ __le16 lval;
+
+ ret = pci_user_read_config_word(pdev, pos, &val);
+ if (ret)
+ return ret;
+
+ lval = cpu_to_le16(val);
+ if (copy_to_user(buf + count - size, &lval, 2))
+ return -EFAULT;
+
+ pos += 2;
+ size -= 2;
+ }
+
+ while (size) {
+ u8 val;
+
+ ret = pci_user_read_config_byte(pdev, pos, &val);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(buf + count - size, &val, 1))
+ return -EFAULT;
+
+ pos++;
+ size--;
+ }
+
+ *ppos += count;
+
+ return count;
+}
+
+static void vfio_pci_igd_cfg_release(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_region *region)
+{
+ struct pci_dev *pdev = region->data;
+
+ pci_dev_put(pdev);
+}
+
+static const struct vfio_pci_regops vfio_pci_igd_cfg_regops = {
+ .rw = vfio_pci_igd_cfg_rw,
+ .release = vfio_pci_igd_cfg_release,
+};
+
+static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *host_bridge, *lpc_bridge;
+ int ret;
+
+ host_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (!host_bridge)
+ return -ENODEV;
+
+ if (host_bridge->vendor != PCI_VENDOR_ID_INTEL ||
+ host_bridge->class != (PCI_CLASS_BRIDGE_HOST << 8)) {
+ pci_dev_put(host_bridge);
+ return -EINVAL;
+ }
+
+ ret = vfio_pci_core_register_dev_region(vdev,
+ PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG,
+ &vfio_pci_igd_cfg_regops, host_bridge->cfg_size,
+ VFIO_REGION_INFO_FLAG_READ, host_bridge);
+ if (ret) {
+ pci_dev_put(host_bridge);
+ return ret;
+ }
+
+ lpc_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x1f, 0));
+ if (!lpc_bridge)
+ return -ENODEV;
+
+ if (lpc_bridge->vendor != PCI_VENDOR_ID_INTEL ||
+ lpc_bridge->class != (PCI_CLASS_BRIDGE_ISA << 8)) {
+ pci_dev_put(lpc_bridge);
+ return -EINVAL;
+ }
+
+ ret = vfio_pci_core_register_dev_region(vdev,
+ PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG,
+ &vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size,
+ VFIO_REGION_INFO_FLAG_READ, lpc_bridge);
+ if (ret) {
+ pci_dev_put(lpc_bridge);
+ return ret;
+ }
+
+ return 0;
+}
+
+int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
+{
+ int ret;
+
+ ret = vfio_pci_igd_opregion_init(vdev);
+ if (ret)
+ return ret;
+
+ ret = vfio_pci_igd_cfg_init(vdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
new file mode 100644
index 000000000..40c3d7cf1
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -0,0 +1,723 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VFIO PCI interrupt handling
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * Derived from original vfio:
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ * Author: Tom Lyon, pugs@cisco.com
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/eventfd.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/file.h>
+#include <linux/vfio.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+
+#include "vfio_pci_priv.h"
+
+struct vfio_pci_irq_ctx {
+ struct eventfd_ctx *trigger;
+ struct virqfd *unmask;
+ struct virqfd *mask;
+ char *name;
+ bool masked;
+ struct irq_bypass_producer producer;
+};
+
+static bool irq_is(struct vfio_pci_core_device *vdev, int type)
+{
+ return vdev->irq_type == type;
+}
+
+static bool is_intx(struct vfio_pci_core_device *vdev)
+{
+ return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX;
+}
+
+static bool is_irq_none(struct vfio_pci_core_device *vdev)
+{
+ return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX ||
+ vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX ||
+ vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
+}
+
+/*
+ * INTx
+ */
+static void vfio_send_intx_eventfd(void *opaque, void *unused)
+{
+ struct vfio_pci_core_device *vdev = opaque;
+
+ if (likely(is_intx(vdev) && !vdev->virq_disabled))
+ eventfd_signal(vdev->ctx[0].trigger, 1);
+}
+
+/* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
+bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ unsigned long flags;
+ bool masked_changed = false;
+
+ spin_lock_irqsave(&vdev->irqlock, flags);
+
+ /*
+ * Masking can come from interrupt, ioctl, or config space
+ * via INTx disable. The latter means this can get called
+ * even when not using intx delivery. In this case, just
+ * try to have the physical bit follow the virtual bit.
+ */
+ if (unlikely(!is_intx(vdev))) {
+ if (vdev->pci_2_3)
+ pci_intx(pdev, 0);
+ } else if (!vdev->ctx[0].masked) {
+ /*
+ * Can't use check_and_mask here because we always want to
+ * mask, not just when something is pending.
+ */
+ if (vdev->pci_2_3)
+ pci_intx(pdev, 0);
+ else
+ disable_irq_nosync(pdev->irq);
+
+ vdev->ctx[0].masked = true;
+ masked_changed = true;
+ }
+
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+ return masked_changed;
+}
+
+/*
+ * If this is triggered by an eventfd, we can't call eventfd_signal
+ * or else we'll deadlock on the eventfd wait queue. Return >0 when
+ * a signal is necessary, which can then be handled via a work queue
+ * or directly depending on the caller.
+ */
+static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
+{
+ struct vfio_pci_core_device *vdev = opaque;
+ struct pci_dev *pdev = vdev->pdev;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&vdev->irqlock, flags);
+
+ /*
+ * Unmasking comes from ioctl or config, so again, have the
+ * physical bit follow the virtual even when not using INTx.
+ */
+ if (unlikely(!is_intx(vdev))) {
+ if (vdev->pci_2_3)
+ pci_intx(pdev, 1);
+ } else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
+ /*
+ * A pending interrupt here would immediately trigger,
+ * but we can avoid that overhead by just re-sending
+ * the interrupt to the user.
+ */
+ if (vdev->pci_2_3) {
+ if (!pci_check_and_unmask_intx(pdev))
+ ret = 1;
+ } else
+ enable_irq(pdev->irq);
+
+ vdev->ctx[0].masked = (ret > 0);
+ }
+
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+
+ return ret;
+}
+
+void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
+{
+ if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
+ vfio_send_intx_eventfd(vdev, NULL);
+}
+
+static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
+{
+ struct vfio_pci_core_device *vdev = dev_id;
+ unsigned long flags;
+ int ret = IRQ_NONE;
+
+ spin_lock_irqsave(&vdev->irqlock, flags);
+
+ if (!vdev->pci_2_3) {
+ disable_irq_nosync(vdev->pdev->irq);
+ vdev->ctx[0].masked = true;
+ ret = IRQ_HANDLED;
+ } else if (!vdev->ctx[0].masked && /* may be shared */
+ pci_check_and_mask_intx(vdev->pdev)) {
+ vdev->ctx[0].masked = true;
+ ret = IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+
+ if (ret == IRQ_HANDLED)
+ vfio_send_intx_eventfd(vdev, NULL);
+
+ return ret;
+}
+
+static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
+{
+ if (!is_irq_none(vdev))
+ return -EINVAL;
+
+ if (!vdev->pdev->irq)
+ return -ENODEV;
+
+ vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
+ if (!vdev->ctx)
+ return -ENOMEM;
+
+ vdev->num_ctx = 1;
+
+ /*
+ * If the virtual interrupt is masked, restore it. Devices
+ * supporting DisINTx can be masked at the hardware level
+ * here, non-PCI-2.3 devices will have to wait until the
+ * interrupt is enabled.
+ */
+ vdev->ctx[0].masked = vdev->virq_disabled;
+ if (vdev->pci_2_3)
+ pci_intx(vdev->pdev, !vdev->ctx[0].masked);
+
+ vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
+
+ return 0;
+}
+
+static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ unsigned long irqflags = IRQF_SHARED;
+ struct eventfd_ctx *trigger;
+ unsigned long flags;
+ int ret;
+
+ if (vdev->ctx[0].trigger) {
+ free_irq(pdev->irq, vdev);
+ kfree(vdev->ctx[0].name);
+ eventfd_ctx_put(vdev->ctx[0].trigger);
+ vdev->ctx[0].trigger = NULL;
+ }
+
+ if (fd < 0) /* Disable only */
+ return 0;
+
+ vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
+ pci_name(pdev));
+ if (!vdev->ctx[0].name)
+ return -ENOMEM;
+
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger)) {
+ kfree(vdev->ctx[0].name);
+ return PTR_ERR(trigger);
+ }
+
+ vdev->ctx[0].trigger = trigger;
+
+ if (!vdev->pci_2_3)
+ irqflags = 0;
+
+ ret = request_irq(pdev->irq, vfio_intx_handler,
+ irqflags, vdev->ctx[0].name, vdev);
+ if (ret) {
+ vdev->ctx[0].trigger = NULL;
+ kfree(vdev->ctx[0].name);
+ eventfd_ctx_put(trigger);
+ return ret;
+ }
+
+ /*
+ * INTx disable will stick across the new irq setup,
+ * disable_irq won't.
+ */
+ spin_lock_irqsave(&vdev->irqlock, flags);
+ if (!vdev->pci_2_3 && vdev->ctx[0].masked)
+ disable_irq_nosync(pdev->irq);
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+
+ return 0;
+}
+
+static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
+{
+ vfio_virqfd_disable(&vdev->ctx[0].unmask);
+ vfio_virqfd_disable(&vdev->ctx[0].mask);
+ vfio_intx_set_signal(vdev, -1);
+ vdev->irq_type = VFIO_PCI_NUM_IRQS;
+ vdev->num_ctx = 0;
+ kfree(vdev->ctx);
+}
+
+/*
+ * MSI/MSI-X
+ */
+static irqreturn_t vfio_msihandler(int irq, void *arg)
+{
+ struct eventfd_ctx *trigger = arg;
+
+ eventfd_signal(trigger, 1);
+ return IRQ_HANDLED;
+}
+
+static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
+ int ret;
+ u16 cmd;
+
+ if (!is_irq_none(vdev))
+ return -EINVAL;
+
+ vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
+ if (!vdev->ctx)
+ return -ENOMEM;
+
+ /* return the number of supported vectors if we can't get all: */
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
+ if (ret < nvec) {
+ if (ret > 0)
+ pci_free_irq_vectors(pdev);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+ kfree(vdev->ctx);
+ return ret;
+ }
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+
+ vdev->num_ctx = nvec;
+ vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
+ VFIO_PCI_MSI_IRQ_INDEX;
+
+ if (!msix) {
+ /*
+ * Compute the virtual hardware field for max msi vectors -
+ * it is the log base 2 of the number of vectors.
+ */
+ vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
+ }
+
+ return 0;
+}
+
+static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
+ int vector, int fd, bool msix)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ struct eventfd_ctx *trigger;
+ int irq, ret;
+ u16 cmd;
+
+ if (vector < 0 || vector >= vdev->num_ctx)
+ return -EINVAL;
+
+ irq = pci_irq_vector(pdev, vector);
+
+ if (vdev->ctx[vector].trigger) {
+ irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
+
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ free_irq(irq, vdev->ctx[vector].trigger);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+
+ kfree(vdev->ctx[vector].name);
+ eventfd_ctx_put(vdev->ctx[vector].trigger);
+ vdev->ctx[vector].trigger = NULL;
+ }
+
+ if (fd < 0)
+ return 0;
+
+ vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)",
+ msix ? "x" : "", vector,
+ pci_name(pdev));
+ if (!vdev->ctx[vector].name)
+ return -ENOMEM;
+
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger)) {
+ kfree(vdev->ctx[vector].name);
+ return PTR_ERR(trigger);
+ }
+
+ /*
+ * The MSIx vector table resides in device memory which may be cleared
+ * via backdoor resets. We don't allow direct access to the vector
+ * table so even if a userspace driver attempts to save/restore around
+ * such a reset it would be unsuccessful. To avoid this, restore the
+ * cached value of the message prior to enabling.
+ */
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ if (msix) {
+ struct msi_msg msg;
+
+ get_cached_msi_msg(irq, &msg);
+ pci_write_msi_msg(irq, &msg);
+ }
+
+ ret = request_irq(irq, vfio_msihandler, 0,
+ vdev->ctx[vector].name, trigger);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+ if (ret) {
+ kfree(vdev->ctx[vector].name);
+ eventfd_ctx_put(trigger);
+ return ret;
+ }
+
+ vdev->ctx[vector].producer.token = trigger;
+ vdev->ctx[vector].producer.irq = irq;
+ ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
+ if (unlikely(ret)) {
+ dev_info(&pdev->dev,
+ "irq bypass producer (token %p) registration fails: %d\n",
+ vdev->ctx[vector].producer.token, ret);
+
+ vdev->ctx[vector].producer.token = NULL;
+ }
+ vdev->ctx[vector].trigger = trigger;
+
+ return 0;
+}
+
+static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
+ unsigned count, int32_t *fds, bool msix)
+{
+ int i, j, ret = 0;
+
+ if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
+ return -EINVAL;
+
+ for (i = 0, j = start; i < count && !ret; i++, j++) {
+ int fd = fds ? fds[i] : -1;
+ ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
+ }
+
+ if (ret) {
+ for (--j; j >= (int)start; j--)
+ vfio_msi_set_vector_signal(vdev, j, -1, msix);
+ }
+
+ return ret;
+}
+
+static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int i;
+ u16 cmd;
+
+ for (i = 0; i < vdev->num_ctx; i++) {
+ vfio_virqfd_disable(&vdev->ctx[i].unmask);
+ vfio_virqfd_disable(&vdev->ctx[i].mask);
+ }
+
+ vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
+
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ pci_free_irq_vectors(pdev);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+
+ /*
+ * Both disable paths above use pci_intx_for_msi() to clear DisINTx
+ * via their shutdown paths. Restore for NoINTx devices.
+ */
+ if (vdev->nointx)
+ pci_intx(pdev, 0);
+
+ vdev->irq_type = VFIO_PCI_NUM_IRQS;
+ vdev->num_ctx = 0;
+ kfree(vdev->ctx);
+}
+
+/*
+ * IOCTL support
+ */
+static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags, void *data)
+{
+ if (!is_intx(vdev) || start != 0 || count != 1)
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ vfio_pci_intx_unmask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t unmask = *(uint8_t *)data;
+ if (unmask)
+ vfio_pci_intx_unmask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t fd = *(int32_t *)data;
+ if (fd >= 0)
+ return vfio_virqfd_enable((void *) vdev,
+ vfio_pci_intx_unmask_handler,
+ vfio_send_intx_eventfd, NULL,
+ &vdev->ctx[0].unmask, fd);
+
+ vfio_virqfd_disable(&vdev->ctx[0].unmask);
+ }
+
+ return 0;
+}
+
+static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags, void *data)
+{
+ if (!is_intx(vdev) || start != 0 || count != 1)
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ vfio_pci_intx_mask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t mask = *(uint8_t *)data;
+ if (mask)
+ vfio_pci_intx_mask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ return -ENOTTY; /* XXX implement me */
+ }
+
+ return 0;
+}
+
+static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags, void *data)
+{
+ if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
+ vfio_intx_disable(vdev);
+ return 0;
+ }
+
+ if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t fd = *(int32_t *)data;
+ int ret;
+
+ if (is_intx(vdev))
+ return vfio_intx_set_signal(vdev, fd);
+
+ ret = vfio_intx_enable(vdev);
+ if (ret)
+ return ret;
+
+ ret = vfio_intx_set_signal(vdev, fd);
+ if (ret)
+ vfio_intx_disable(vdev);
+
+ return ret;
+ }
+
+ if (!is_intx(vdev))
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ vfio_send_intx_eventfd(vdev, NULL);
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t trigger = *(uint8_t *)data;
+ if (trigger)
+ vfio_send_intx_eventfd(vdev, NULL);
+ }
+ return 0;
+}
+
+static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags, void *data)
+{
+ int i;
+ bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
+
+ if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
+ vfio_msi_disable(vdev, msix);
+ return 0;
+ }
+
+ if (!(irq_is(vdev, index) || is_irq_none(vdev)))
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t *fds = data;
+ int ret;
+
+ if (vdev->irq_type == index)
+ return vfio_msi_set_block(vdev, start, count,
+ fds, msix);
+
+ ret = vfio_msi_enable(vdev, start + count, msix);
+ if (ret)
+ return ret;
+
+ ret = vfio_msi_set_block(vdev, start, count, fds, msix);
+ if (ret)
+ vfio_msi_disable(vdev, msix);
+
+ return ret;
+ }
+
+ if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
+ return -EINVAL;
+
+ for (i = start; i < start + count; i++) {
+ if (!vdev->ctx[i].trigger)
+ continue;
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ eventfd_signal(vdev->ctx[i].trigger, 1);
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t *bools = data;
+ if (bools[i - start])
+ eventfd_signal(vdev->ctx[i].trigger, 1);
+ }
+ }
+ return 0;
+}
+
+static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
+ unsigned int count, uint32_t flags,
+ void *data)
+{
+ /* DATA_NONE/DATA_BOOL enables loopback testing */
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ if (*ctx) {
+ if (count) {
+ eventfd_signal(*ctx, 1);
+ } else {
+ eventfd_ctx_put(*ctx);
+ *ctx = NULL;
+ }
+ return 0;
+ }
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t trigger;
+
+ if (!count)
+ return -EINVAL;
+
+ trigger = *(uint8_t *)data;
+ if (trigger && *ctx)
+ eventfd_signal(*ctx, 1);
+
+ return 0;
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t fd;
+
+ if (!count)
+ return -EINVAL;
+
+ fd = *(int32_t *)data;
+ if (fd == -1) {
+ if (*ctx)
+ eventfd_ctx_put(*ctx);
+ *ctx = NULL;
+ } else if (fd >= 0) {
+ struct eventfd_ctx *efdctx;
+
+ efdctx = eventfd_ctx_fdget(fd);
+ if (IS_ERR(efdctx))
+ return PTR_ERR(efdctx);
+
+ if (*ctx)
+ eventfd_ctx_put(*ctx);
+
+ *ctx = efdctx;
+ }
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags, void *data)
+{
+ if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
+ return -EINVAL;
+
+ return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
+ count, flags, data);
+}
+
+static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags, void *data)
+{
+ if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
+ return -EINVAL;
+
+ return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
+ count, flags, data);
+}
+
+int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
+ unsigned index, unsigned start, unsigned count,
+ void *data)
+{
+ int (*func)(struct vfio_pci_core_device *vdev, unsigned index,
+ unsigned start, unsigned count, uint32_t flags,
+ void *data) = NULL;
+
+ switch (index) {
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ func = vfio_pci_set_intx_mask;
+ break;
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ func = vfio_pci_set_intx_unmask;
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ func = vfio_pci_set_intx_trigger;
+ break;
+ }
+ break;
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ case VFIO_PCI_MSIX_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ /* XXX Need masking support exported */
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ func = vfio_pci_set_msi_trigger;
+ break;
+ }
+ break;
+ case VFIO_PCI_ERR_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ if (pci_is_pcie(vdev->pdev))
+ func = vfio_pci_set_err_trigger;
+ break;
+ }
+ break;
+ case VFIO_PCI_REQ_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ func = vfio_pci_set_req_trigger;
+ break;
+ }
+ break;
+ }
+
+ if (!func)
+ return -ENOTTY;
+
+ return func(vdev, index, start, count, flags, data);
+}
diff --git a/drivers/vfio/pci/vfio_pci_priv.h b/drivers/vfio/pci/vfio_pci_priv.h
new file mode 100644
index 000000000..5e4fa69ae
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_priv.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef VFIO_PCI_PRIV_H
+#define VFIO_PCI_PRIV_H
+
+#include <linux/vfio_pci_core.h>
+
+/* Special capability IDs predefined access */
+#define PCI_CAP_ID_INVALID 0xFF /* default raw access */
+#define PCI_CAP_ID_INVALID_VIRT 0xFE /* default virt access */
+
+/* Cap maximum number of ioeventfds per device (arbitrary) */
+#define VFIO_PCI_IOEVENTFD_MAX 1000
+
+struct vfio_pci_ioeventfd {
+ struct list_head next;
+ struct vfio_pci_core_device *vdev;
+ struct virqfd *virqfd;
+ void __iomem *addr;
+ uint64_t data;
+ loff_t pos;
+ int bar;
+ int count;
+ bool test_mem;
+};
+
+bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev);
+void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev);
+
+int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
+ unsigned index, unsigned start, unsigned count,
+ void *data);
+
+ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+
+ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+
+#ifdef CONFIG_VFIO_PCI_VGA
+ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+#else
+static inline ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos, bool iswrite)
+{
+ return -EINVAL;
+}
+#endif
+
+int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
+ uint64_t data, int count, int fd);
+
+int vfio_pci_init_perm_bits(void);
+void vfio_pci_uninit_perm_bits(void);
+
+int vfio_config_init(struct vfio_pci_core_device *vdev);
+void vfio_config_free(struct vfio_pci_core_device *vdev);
+
+int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev,
+ pci_power_t state);
+
+bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev);
+void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev);
+u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev);
+void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev,
+ u16 cmd);
+
+#ifdef CONFIG_VFIO_PCI_IGD
+int vfio_pci_igd_init(struct vfio_pci_core_device *vdev);
+#else
+static inline int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
+{
+ return -ENODEV;
+}
+#endif
+
+#ifdef CONFIG_VFIO_PCI_ZDEV_KVM
+int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
+ struct vfio_info_cap *caps);
+int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev);
+void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev);
+#else
+static inline int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
+ struct vfio_info_cap *caps)
+{
+ return -ENODEV;
+}
+
+static inline int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev)
+{
+ return 0;
+}
+
+static inline void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev)
+{}
+#endif
+
+static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
+{
+ return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
+}
+
+#endif
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
new file mode 100644
index 000000000..e352a033b
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -0,0 +1,502 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VFIO PCI I/O Port & MMIO access
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * Derived from original vfio:
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ * Author: Tom Lyon, pugs@cisco.com
+ */
+
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/vfio.h>
+#include <linux/vgaarb.h>
+
+#include "vfio_pci_priv.h"
+
+#ifdef __LITTLE_ENDIAN
+#define vfio_ioread64 ioread64
+#define vfio_iowrite64 iowrite64
+#define vfio_ioread32 ioread32
+#define vfio_iowrite32 iowrite32
+#define vfio_ioread16 ioread16
+#define vfio_iowrite16 iowrite16
+#else
+#define vfio_ioread64 ioread64be
+#define vfio_iowrite64 iowrite64be
+#define vfio_ioread32 ioread32be
+#define vfio_iowrite32 iowrite32be
+#define vfio_ioread16 ioread16be
+#define vfio_iowrite16 iowrite16be
+#endif
+#define vfio_ioread8 ioread8
+#define vfio_iowrite8 iowrite8
+
+#define VFIO_IOWRITE(size) \
+static int vfio_pci_iowrite##size(struct vfio_pci_core_device *vdev, \
+ bool test_mem, u##size val, void __iomem *io) \
+{ \
+ if (test_mem) { \
+ down_read(&vdev->memory_lock); \
+ if (!__vfio_pci_memory_enabled(vdev)) { \
+ up_read(&vdev->memory_lock); \
+ return -EIO; \
+ } \
+ } \
+ \
+ vfio_iowrite##size(val, io); \
+ \
+ if (test_mem) \
+ up_read(&vdev->memory_lock); \
+ \
+ return 0; \
+}
+
+VFIO_IOWRITE(8)
+VFIO_IOWRITE(16)
+VFIO_IOWRITE(32)
+#ifdef iowrite64
+VFIO_IOWRITE(64)
+#endif
+
+#define VFIO_IOREAD(size) \
+static int vfio_pci_ioread##size(struct vfio_pci_core_device *vdev, \
+ bool test_mem, u##size *val, void __iomem *io) \
+{ \
+ if (test_mem) { \
+ down_read(&vdev->memory_lock); \
+ if (!__vfio_pci_memory_enabled(vdev)) { \
+ up_read(&vdev->memory_lock); \
+ return -EIO; \
+ } \
+ } \
+ \
+ *val = vfio_ioread##size(io); \
+ \
+ if (test_mem) \
+ up_read(&vdev->memory_lock); \
+ \
+ return 0; \
+}
+
+VFIO_IOREAD(8)
+VFIO_IOREAD(16)
+VFIO_IOREAD(32)
+
+/*
+ * Read or write from an __iomem region (MMIO or I/O port) with an excluded
+ * range which is inaccessible. The excluded range drops writes and fills
+ * reads with -1. This is intended for handling MSI-X vector tables and
+ * leftover space for ROM BARs.
+ */
+static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
+ void __iomem *io, char __user *buf,
+ loff_t off, size_t count, size_t x_start,
+ size_t x_end, bool iswrite)
+{
+ ssize_t done = 0;
+ int ret;
+
+ while (count) {
+ size_t fillable, filled;
+
+ if (off < x_start)
+ fillable = min(count, (size_t)(x_start - off));
+ else if (off >= x_end)
+ fillable = count;
+ else
+ fillable = 0;
+
+ if (fillable >= 4 && !(off % 4)) {
+ u32 val;
+
+ if (iswrite) {
+ if (copy_from_user(&val, buf, 4))
+ return -EFAULT;
+
+ ret = vfio_pci_iowrite32(vdev, test_mem,
+ val, io + off);
+ if (ret)
+ return ret;
+ } else {
+ ret = vfio_pci_ioread32(vdev, test_mem,
+ &val, io + off);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(buf, &val, 4))
+ return -EFAULT;
+ }
+
+ filled = 4;
+ } else if (fillable >= 2 && !(off % 2)) {
+ u16 val;
+
+ if (iswrite) {
+ if (copy_from_user(&val, buf, 2))
+ return -EFAULT;
+
+ ret = vfio_pci_iowrite16(vdev, test_mem,
+ val, io + off);
+ if (ret)
+ return ret;
+ } else {
+ ret = vfio_pci_ioread16(vdev, test_mem,
+ &val, io + off);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(buf, &val, 2))
+ return -EFAULT;
+ }
+
+ filled = 2;
+ } else if (fillable) {
+ u8 val;
+
+ if (iswrite) {
+ if (copy_from_user(&val, buf, 1))
+ return -EFAULT;
+
+ ret = vfio_pci_iowrite8(vdev, test_mem,
+ val, io + off);
+ if (ret)
+ return ret;
+ } else {
+ ret = vfio_pci_ioread8(vdev, test_mem,
+ &val, io + off);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(buf, &val, 1))
+ return -EFAULT;
+ }
+
+ filled = 1;
+ } else {
+ /* Fill reads with -1, drop writes */
+ filled = min(count, (size_t)(x_end - off));
+ if (!iswrite) {
+ u8 val = 0xFF;
+ size_t i;
+
+ for (i = 0; i < filled; i++)
+ if (copy_to_user(buf + i, &val, 1))
+ return -EFAULT;
+ }
+ }
+
+ count -= filled;
+ done += filled;
+ off += filled;
+ buf += filled;
+ }
+
+ return done;
+}
+
+static int vfio_pci_setup_barmap(struct vfio_pci_core_device *vdev, int bar)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int ret;
+ void __iomem *io;
+
+ if (vdev->barmap[bar])
+ return 0;
+
+ ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
+ if (ret)
+ return ret;
+
+ io = pci_iomap(pdev, bar, 0);
+ if (!io) {
+ pci_release_selected_regions(pdev, 1 << bar);
+ return -ENOMEM;
+ }
+
+ vdev->barmap[bar] = io;
+
+ return 0;
+}
+
+ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ size_t x_start = 0, x_end = 0;
+ resource_size_t end;
+ void __iomem *io;
+ struct resource *res = &vdev->pdev->resource[bar];
+ ssize_t done;
+
+ if (pci_resource_start(pdev, bar))
+ end = pci_resource_len(pdev, bar);
+ else if (bar == PCI_ROM_RESOURCE &&
+ pdev->resource[bar].flags & IORESOURCE_ROM_SHADOW)
+ end = 0x20000;
+ else
+ return -EINVAL;
+
+ if (pos >= end)
+ return -EINVAL;
+
+ count = min(count, (size_t)(end - pos));
+
+ if (bar == PCI_ROM_RESOURCE) {
+ /*
+ * The ROM can fill less space than the BAR, so we start the
+ * excluded range at the end of the actual ROM. This makes
+ * filling large ROM BARs much faster.
+ */
+ io = pci_map_rom(pdev, &x_start);
+ if (!io) {
+ done = -ENOMEM;
+ goto out;
+ }
+ x_end = end;
+ } else {
+ int ret = vfio_pci_setup_barmap(vdev, bar);
+ if (ret) {
+ done = ret;
+ goto out;
+ }
+
+ io = vdev->barmap[bar];
+ }
+
+ if (bar == vdev->msix_bar) {
+ x_start = vdev->msix_offset;
+ x_end = vdev->msix_offset + vdev->msix_size;
+ }
+
+ done = do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos,
+ count, x_start, x_end, iswrite);
+
+ if (done >= 0)
+ *ppos += done;
+
+ if (bar == PCI_ROM_RESOURCE)
+ pci_unmap_rom(pdev, io);
+out:
+ return done;
+}
+
+#ifdef CONFIG_VFIO_PCI_VGA
+ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ int ret;
+ loff_t off, pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ void __iomem *iomem = NULL;
+ unsigned int rsrc;
+ bool is_ioport;
+ ssize_t done;
+
+ if (!vdev->has_vga)
+ return -EINVAL;
+
+ if (pos > 0xbfffful)
+ return -EINVAL;
+
+ switch ((u32)pos) {
+ case 0xa0000 ... 0xbffff:
+ count = min(count, (size_t)(0xc0000 - pos));
+ iomem = ioremap(0xa0000, 0xbffff - 0xa0000 + 1);
+ off = pos - 0xa0000;
+ rsrc = VGA_RSRC_LEGACY_MEM;
+ is_ioport = false;
+ break;
+ case 0x3b0 ... 0x3bb:
+ count = min(count, (size_t)(0x3bc - pos));
+ iomem = ioport_map(0x3b0, 0x3bb - 0x3b0 + 1);
+ off = pos - 0x3b0;
+ rsrc = VGA_RSRC_LEGACY_IO;
+ is_ioport = true;
+ break;
+ case 0x3c0 ... 0x3df:
+ count = min(count, (size_t)(0x3e0 - pos));
+ iomem = ioport_map(0x3c0, 0x3df - 0x3c0 + 1);
+ off = pos - 0x3c0;
+ rsrc = VGA_RSRC_LEGACY_IO;
+ is_ioport = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!iomem)
+ return -ENOMEM;
+
+ ret = vga_get_interruptible(vdev->pdev, rsrc);
+ if (ret) {
+ is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
+ return ret;
+ }
+
+ /*
+ * VGA MMIO is a legacy, non-BAR resource that hopefully allows
+ * probing, so we don't currently worry about access in relation
+ * to the memory enable bit in the command register.
+ */
+ done = do_io_rw(vdev, false, iomem, buf, off, count, 0, 0, iswrite);
+
+ vga_put(vdev->pdev, rsrc);
+
+ is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
+
+ if (done >= 0)
+ *ppos += done;
+
+ return done;
+}
+#endif
+
+static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd,
+ bool test_mem)
+{
+ switch (ioeventfd->count) {
+ case 1:
+ vfio_pci_iowrite8(ioeventfd->vdev, test_mem,
+ ioeventfd->data, ioeventfd->addr);
+ break;
+ case 2:
+ vfio_pci_iowrite16(ioeventfd->vdev, test_mem,
+ ioeventfd->data, ioeventfd->addr);
+ break;
+ case 4:
+ vfio_pci_iowrite32(ioeventfd->vdev, test_mem,
+ ioeventfd->data, ioeventfd->addr);
+ break;
+#ifdef iowrite64
+ case 8:
+ vfio_pci_iowrite64(ioeventfd->vdev, test_mem,
+ ioeventfd->data, ioeventfd->addr);
+ break;
+#endif
+ }
+}
+
+static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
+{
+ struct vfio_pci_ioeventfd *ioeventfd = opaque;
+ struct vfio_pci_core_device *vdev = ioeventfd->vdev;
+
+ if (ioeventfd->test_mem) {
+ if (!down_read_trylock(&vdev->memory_lock))
+ return 1; /* Lock contended, use thread */
+ if (!__vfio_pci_memory_enabled(vdev)) {
+ up_read(&vdev->memory_lock);
+ return 0;
+ }
+ }
+
+ vfio_pci_ioeventfd_do_write(ioeventfd, false);
+
+ if (ioeventfd->test_mem)
+ up_read(&vdev->memory_lock);
+
+ return 0;
+}
+
+static void vfio_pci_ioeventfd_thread(void *opaque, void *unused)
+{
+ struct vfio_pci_ioeventfd *ioeventfd = opaque;
+
+ vfio_pci_ioeventfd_do_write(ioeventfd, ioeventfd->test_mem);
+}
+
+int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
+ uint64_t data, int count, int fd)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ loff_t pos = offset & VFIO_PCI_OFFSET_MASK;
+ int ret, bar = VFIO_PCI_OFFSET_TO_INDEX(offset);
+ struct vfio_pci_ioeventfd *ioeventfd;
+
+ /* Only support ioeventfds into BARs */
+ if (bar > VFIO_PCI_BAR5_REGION_INDEX)
+ return -EINVAL;
+
+ if (pos + count > pci_resource_len(pdev, bar))
+ return -EINVAL;
+
+ /* Disallow ioeventfds working around MSI-X table writes */
+ if (bar == vdev->msix_bar &&
+ !(pos + count <= vdev->msix_offset ||
+ pos >= vdev->msix_offset + vdev->msix_size))
+ return -EINVAL;
+
+#ifndef iowrite64
+ if (count == 8)
+ return -EINVAL;
+#endif
+
+ ret = vfio_pci_setup_barmap(vdev, bar);
+ if (ret)
+ return ret;
+
+ mutex_lock(&vdev->ioeventfds_lock);
+
+ list_for_each_entry(ioeventfd, &vdev->ioeventfds_list, next) {
+ if (ioeventfd->pos == pos && ioeventfd->bar == bar &&
+ ioeventfd->data == data && ioeventfd->count == count) {
+ if (fd == -1) {
+ vfio_virqfd_disable(&ioeventfd->virqfd);
+ list_del(&ioeventfd->next);
+ vdev->ioeventfds_nr--;
+ kfree(ioeventfd);
+ ret = 0;
+ } else
+ ret = -EEXIST;
+
+ goto out_unlock;
+ }
+ }
+
+ if (fd < 0) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ if (vdev->ioeventfds_nr >= VFIO_PCI_IOEVENTFD_MAX) {
+ ret = -ENOSPC;
+ goto out_unlock;
+ }
+
+ ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL);
+ if (!ioeventfd) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ ioeventfd->vdev = vdev;
+ ioeventfd->addr = vdev->barmap[bar] + pos;
+ ioeventfd->data = data;
+ ioeventfd->pos = pos;
+ ioeventfd->bar = bar;
+ ioeventfd->count = count;
+ ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM;
+
+ ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler,
+ vfio_pci_ioeventfd_thread, NULL,
+ &ioeventfd->virqfd, fd);
+ if (ret) {
+ kfree(ioeventfd);
+ goto out_unlock;
+ }
+
+ list_add(&ioeventfd->next, &vdev->ioeventfds_list);
+ vdev->ioeventfds_nr++;
+
+out_unlock:
+ mutex_unlock(&vdev->ioeventfds_lock);
+
+ return ret;
+}
diff --git a/drivers/vfio/pci/vfio_pci_zdev.c b/drivers/vfio/pci/vfio_pci_zdev.c
new file mode 100644
index 000000000..0990fdb14
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_zdev.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VFIO ZPCI devices support
+ *
+ * Copyright (C) IBM Corp. 2020. All rights reserved.
+ * Author(s): Pierre Morel <pmorel@linux.ibm.com>
+ * Matthew Rosato <mjrosato@linux.ibm.com>
+ */
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+#include <linux/vfio_zdev.h>
+#include <linux/kvm_host.h>
+#include <asm/pci_clp.h>
+#include <asm/pci_io.h>
+
+#include "vfio_pci_priv.h"
+
+/*
+ * Add the Base PCI Function information to the device info region.
+ */
+static int zpci_base_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
+{
+ struct vfio_device_info_cap_zpci_base cap = {
+ .header.id = VFIO_DEVICE_INFO_CAP_ZPCI_BASE,
+ .header.version = 2,
+ .start_dma = zdev->start_dma,
+ .end_dma = zdev->end_dma,
+ .pchid = zdev->pchid,
+ .vfn = zdev->vfn,
+ .fmb_length = zdev->fmb_length,
+ .pft = zdev->pft,
+ .gid = zdev->pfgid,
+ .fh = zdev->fh
+ };
+
+ return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
+}
+
+/*
+ * Add the Base PCI Function Group information to the device info region.
+ */
+static int zpci_group_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
+{
+ struct vfio_device_info_cap_zpci_group cap = {
+ .header.id = VFIO_DEVICE_INFO_CAP_ZPCI_GROUP,
+ .header.version = 2,
+ .dasm = zdev->dma_mask,
+ .msi_addr = zdev->msi_addr,
+ .flags = VFIO_DEVICE_INFO_ZPCI_FLAG_REFRESH,
+ .mui = zdev->fmb_update,
+ .noi = zdev->max_msi,
+ .maxstbl = ZPCI_MAX_WRITE_SIZE,
+ .version = zdev->version,
+ .reserved = 0,
+ .imaxstbl = zdev->maxstbl
+ };
+
+ return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
+}
+
+/*
+ * Add the device utility string to the device info region.
+ */
+static int zpci_util_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
+{
+ struct vfio_device_info_cap_zpci_util *cap;
+ int cap_size = sizeof(*cap) + CLP_UTIL_STR_LEN;
+ int ret;
+
+ cap = kmalloc(cap_size, GFP_KERNEL);
+ if (!cap)
+ return -ENOMEM;
+
+ cap->header.id = VFIO_DEVICE_INFO_CAP_ZPCI_UTIL;
+ cap->header.version = 1;
+ cap->size = CLP_UTIL_STR_LEN;
+ memcpy(cap->util_str, zdev->util_str, cap->size);
+
+ ret = vfio_info_add_capability(caps, &cap->header, cap_size);
+
+ kfree(cap);
+
+ return ret;
+}
+
+/*
+ * Add the function path string to the device info region.
+ */
+static int zpci_pfip_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
+{
+ struct vfio_device_info_cap_zpci_pfip *cap;
+ int cap_size = sizeof(*cap) + CLP_PFIP_NR_SEGMENTS;
+ int ret;
+
+ cap = kmalloc(cap_size, GFP_KERNEL);
+ if (!cap)
+ return -ENOMEM;
+
+ cap->header.id = VFIO_DEVICE_INFO_CAP_ZPCI_PFIP;
+ cap->header.version = 1;
+ cap->size = CLP_PFIP_NR_SEGMENTS;
+ memcpy(cap->pfip, zdev->pfip, cap->size);
+
+ ret = vfio_info_add_capability(caps, &cap->header, cap_size);
+
+ kfree(cap);
+
+ return ret;
+}
+
+/*
+ * Add all supported capabilities to the VFIO_DEVICE_GET_INFO capability chain.
+ */
+int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
+ struct vfio_info_cap *caps)
+{
+ struct zpci_dev *zdev = to_zpci(vdev->pdev);
+ int ret;
+
+ if (!zdev)
+ return -ENODEV;
+
+ ret = zpci_base_cap(zdev, caps);
+ if (ret)
+ return ret;
+
+ ret = zpci_group_cap(zdev, caps);
+ if (ret)
+ return ret;
+
+ if (zdev->util_str_avail) {
+ ret = zpci_util_cap(zdev, caps);
+ if (ret)
+ return ret;
+ }
+
+ ret = zpci_pfip_cap(zdev, caps);
+
+ return ret;
+}
+
+int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev)
+{
+ struct zpci_dev *zdev = to_zpci(vdev->pdev);
+
+ if (!zdev)
+ return -ENODEV;
+
+ if (!vdev->vdev.kvm)
+ return 0;
+
+ if (zpci_kvm_hook.kvm_register)
+ return zpci_kvm_hook.kvm_register(zdev, vdev->vdev.kvm);
+
+ return -ENOENT;
+}
+
+void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev)
+{
+ struct zpci_dev *zdev = to_zpci(vdev->pdev);
+
+ if (!zdev || !vdev->vdev.kvm)
+ return;
+
+ if (zpci_kvm_hook.kvm_unregister)
+ zpci_kvm_hook.kvm_unregister(zdev);
+}