diff options
Diffstat (limited to 'drivers/vfio/pci')
-rw-r--r-- | drivers/vfio/pci/Kconfig | 41 | ||||
-rw-r--r-- | drivers/vfio/pci/Makefile | 5 | ||||
-rw-r--r-- | drivers/vfio/pci/vfio_pci.c | 1829 | ||||
-rw-r--r-- | drivers/vfio/pci/vfio_pci_config.c | 1908 | ||||
-rw-r--r-- | drivers/vfio/pci/vfio_pci_igd.c | 280 | ||||
-rw-r--r-- | drivers/vfio/pci/vfio_pci_intrs.c | 696 | ||||
-rw-r--r-- | drivers/vfio/pci/vfio_pci_private.h | 176 | ||||
-rw-r--r-- | drivers/vfio/pci/vfio_pci_rdwr.c | 404 |
8 files changed, 5339 insertions, 0 deletions
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig new file mode 100644 index 000000000..fcbfd0aac --- /dev/null +++ b/drivers/vfio/pci/Kconfig @@ -0,0 +1,41 @@ +config VFIO_PCI + tristate "VFIO support for PCI devices" + depends on VFIO && PCI && EVENTFD + depends on MMU + select VFIO_VIRQFD + select IRQ_BYPASS_MANAGER + help + Support for the PCI VFIO bus driver. This is required to make + use of PCI drivers using the VFIO framework. + + If you don't know what to do here, say N. + +config VFIO_PCI_VGA + bool "VFIO PCI support for VGA devices" + depends on VFIO_PCI && X86 && VGA_ARB + help + Support for VGA extension to VFIO PCI. This exposes an additional + region on VGA devices for accessing legacy VGA addresses used by + BIOS and generic video drivers. + + If you don't know what to do here, say N. + +config VFIO_PCI_MMAP + depends on VFIO_PCI + def_bool y if !S390 + +config VFIO_PCI_INTX + depends on VFIO_PCI + def_bool y if !S390 + +config VFIO_PCI_IGD + bool "VFIO PCI extensions for Intel graphics (GVT-d)" + depends on VFIO_PCI && X86 + default y + help + Support for Intel IGD specific extensions to enable direct + assignment to virtual machines. This includes exposing an IGD + specific firmware table and read-only copies of the host bridge + and LPC bridge config space. + + To enable Intel IGD assignment through vfio-pci, say Y. diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile new file mode 100644 index 000000000..76d8ec058 --- /dev/null +++ b/drivers/vfio/pci/Makefile @@ -0,0 +1,5 @@ + +vfio-pci-y := vfio_pci.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o +vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o + +obj-$(CONFIG_VFIO_PCI) += vfio-pci.o diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c new file mode 100644 index 000000000..51b791c75 --- /dev/null +++ b/drivers/vfio/pci/vfio_pci.c @@ -0,0 +1,1829 @@ +/* + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/device.h> +#include <linux/eventfd.h> +#include <linux/file.h> +#include <linux/interrupt.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/notifier.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include <linux/vfio.h> +#include <linux/vgaarb.h> +#include <linux/nospec.h> +#include <linux/sched/mm.h> + +#include "vfio_pci_private.h" + +#define DRIVER_VERSION "0.2" +#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" +#define DRIVER_DESC "VFIO PCI - User Level meta-driver" + +static char ids[1024] __initdata; +module_param_string(ids, ids, sizeof(ids), 0); +MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified"); + +static bool nointxmask; +module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(nointxmask, + "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag."); + +#ifdef CONFIG_VFIO_PCI_VGA +static bool disable_vga; +module_param(disable_vga, bool, S_IRUGO); +MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci"); +#endif + +static bool disable_idle_d3; +module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(disable_idle_d3, + "Disable using the PCI D3 low power state for idle, unused devices"); + +static DEFINE_MUTEX(driver_lock); + +static inline bool vfio_vga_disabled(void) +{ +#ifdef CONFIG_VFIO_PCI_VGA + return disable_vga; +#else + return true; +#endif +} + +/* + * Our VGA arbiter participation is limited since we don't know anything + * about the device itself. However, if the device is the only VGA device + * downstream of a bridge and VFIO VGA support is disabled, then we can + * safely return legacy VGA IO and memory as not decoded since the user + * has no way to get to it and routing can be disabled externally at the + * bridge. + */ +static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga) +{ + struct vfio_pci_device *vdev = opaque; + struct pci_dev *tmp = NULL, *pdev = vdev->pdev; + unsigned char max_busnr; + unsigned int decodes; + + if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus)) + return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM | + VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; + + max_busnr = pci_bus_max_busnr(pdev->bus); + decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + + while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) { + if (tmp == pdev || + pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) || + pci_is_root_bus(tmp->bus)) + continue; + + if (tmp->bus->number >= pdev->bus->number && + tmp->bus->number <= max_busnr) { + pci_dev_put(tmp); + decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; + break; + } + } + + return decodes; +} + +static inline bool vfio_pci_is_vga(struct pci_dev *pdev) +{ + return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA; +} + +static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev) +{ + struct resource *res; + int bar; + struct vfio_pci_dummy_resource *dummy_res; + + for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) { + res = vdev->pdev->resource + bar; + + if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP)) + goto no_mmap; + + if (!(res->flags & IORESOURCE_MEM)) + goto no_mmap; + + /* + * The PCI core shouldn't set up a resource with a + * type but zero size. But there may be bugs that + * cause us to do that. + */ + if (!resource_size(res)) + goto no_mmap; + + if (resource_size(res) >= PAGE_SIZE) { + vdev->bar_mmap_supported[bar] = true; + continue; + } + + if (!(res->start & ~PAGE_MASK)) { + /* + * Add a dummy resource to reserve the remainder + * of the exclusive page in case that hot-add + * device's bar is assigned into it. + */ + dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL); + if (dummy_res == NULL) + goto no_mmap; + + dummy_res->resource.name = "vfio sub-page reserved"; + dummy_res->resource.start = res->end + 1; + dummy_res->resource.end = res->start + PAGE_SIZE - 1; + dummy_res->resource.flags = res->flags; + if (request_resource(res->parent, + &dummy_res->resource)) { + kfree(dummy_res); + goto no_mmap; + } + dummy_res->index = bar; + list_add(&dummy_res->res_next, + &vdev->dummy_resources_list); + vdev->bar_mmap_supported[bar] = true; + continue; + } + /* + * Here we don't handle the case when the BAR is not page + * aligned because we can't expect the BAR will be + * assigned into the same location in a page in guest + * when we passthrough the BAR. And it's hard to access + * this BAR in userspace because we have no way to get + * the BAR's location in a page. + */ +no_mmap: + vdev->bar_mmap_supported[bar] = false; + } +} + +static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev); +static void vfio_pci_disable(struct vfio_pci_device *vdev); +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data); + +/* + * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND + * _and_ the ability detect when the device is asserting INTx via PCI_STATUS. + * If a device implements the former but not the latter we would typically + * expect broken_intx_masking be set and require an exclusive interrupt. + * However since we do have control of the device's ability to assert INTx, + * we can instead pretend that the device does not implement INTx, virtualizing + * the pin register to report zero and maintaining DisINTx set on the host. + */ +static bool vfio_pci_nointx(struct pci_dev *pdev) +{ + switch (pdev->vendor) { + case PCI_VENDOR_ID_INTEL: + switch (pdev->device) { + /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */ + case 0x1572: + case 0x1574: + case 0x1580 ... 0x1581: + case 0x1583 ... 0x158b: + case 0x37d0 ... 0x37d2: + return true; + default: + return false; + } + } + + return false; +} + +static int vfio_pci_enable(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + int ret; + u16 cmd; + u8 msix_pos; + + pci_set_power_state(pdev, PCI_D0); + + /* Don't allow our initial saved state to include busmaster */ + pci_clear_master(pdev); + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + /* If reset fails because of the device lock, fail this path entirely */ + ret = pci_try_reset_function(pdev); + if (ret == -EAGAIN) { + pci_disable_device(pdev); + return ret; + } + + vdev->reset_works = !ret; + pci_save_state(pdev); + vdev->pci_saved_state = pci_store_saved_state(pdev); + if (!vdev->pci_saved_state) + pr_debug("%s: Couldn't store %s saved state\n", + __func__, dev_name(&pdev->dev)); + + if (likely(!nointxmask)) { + if (vfio_pci_nointx(pdev)) { + dev_info(&pdev->dev, "Masking broken INTx support\n"); + vdev->nointx = true; + pci_intx(pdev, 0); + } else + vdev->pci_2_3 = pci_intx_mask_supported(pdev); + } + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) { + cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } + + ret = vfio_config_init(vdev); + if (ret) { + kfree(vdev->pci_saved_state); + vdev->pci_saved_state = NULL; + pci_disable_device(pdev); + return ret; + } + + msix_pos = pdev->msix_cap; + if (msix_pos) { + u16 flags; + u32 table; + + pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags); + pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table); + + vdev->msix_bar = table & PCI_MSIX_TABLE_BIR; + vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET; + vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16; + } else + vdev->msix_bar = 0xFF; + + if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev)) + vdev->has_vga = true; + + + if (vfio_pci_is_vga(pdev) && + pdev->vendor == PCI_VENDOR_ID_INTEL && + IS_ENABLED(CONFIG_VFIO_PCI_IGD)) { + ret = vfio_pci_igd_init(vdev); + if (ret) { + dev_warn(&vdev->pdev->dev, + "Failed to setup Intel IGD regions\n"); + vfio_pci_disable(vdev); + return ret; + } + } + + vfio_pci_probe_mmaps(vdev); + + return 0; +} + +static void vfio_pci_disable(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + struct vfio_pci_dummy_resource *dummy_res, *tmp; + struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp; + int i, bar; + + /* Stop the device from further DMA */ + pci_clear_master(pdev); + + vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE | + VFIO_IRQ_SET_ACTION_TRIGGER, + vdev->irq_type, 0, 0, NULL); + + /* Device closed, don't need mutex here */ + list_for_each_entry_safe(ioeventfd, ioeventfd_tmp, + &vdev->ioeventfds_list, next) { + vfio_virqfd_disable(&ioeventfd->virqfd); + list_del(&ioeventfd->next); + kfree(ioeventfd); + } + vdev->ioeventfds_nr = 0; + + vdev->virq_disabled = false; + + for (i = 0; i < vdev->num_regions; i++) + vdev->region[i].ops->release(vdev, &vdev->region[i]); + + vdev->num_regions = 0; + kfree(vdev->region); + vdev->region = NULL; /* don't krealloc a freed pointer */ + + vfio_config_free(vdev); + + for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) { + if (!vdev->barmap[bar]) + continue; + pci_iounmap(pdev, vdev->barmap[bar]); + pci_release_selected_regions(pdev, 1 << bar); + vdev->barmap[bar] = NULL; + } + + list_for_each_entry_safe(dummy_res, tmp, + &vdev->dummy_resources_list, res_next) { + list_del(&dummy_res->res_next); + release_resource(&dummy_res->resource); + kfree(dummy_res); + } + + vdev->needs_reset = true; + + /* + * If we have saved state, restore it. If we can reset the device, + * even better. Resetting with current state seems better than + * nothing, but saving and restoring current state without reset + * is just busy work. + */ + if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) { + pr_info("%s: Couldn't reload %s saved state\n", + __func__, dev_name(&pdev->dev)); + + if (!vdev->reset_works) + goto out; + + pci_save_state(pdev); + } + + /* + * Disable INTx and MSI, presumably to avoid spurious interrupts + * during reset. Stolen from pci_reset_function() + */ + pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); + + /* + * Try to get the locks ourselves to prevent a deadlock. The + * success of this is dependent on being able to lock the device, + * which is not always possible. + * We can not use the "try" reset interface here, which will + * overwrite the previously restored configuration information. + */ + if (vdev->reset_works && pci_cfg_access_trylock(pdev)) { + if (device_trylock(&pdev->dev)) { + if (!__pci_reset_function_locked(pdev)) + vdev->needs_reset = false; + device_unlock(&pdev->dev); + } + pci_cfg_access_unlock(pdev); + } + + pci_restore_state(pdev); +out: + pci_disable_device(pdev); + + vfio_pci_try_bus_reset(vdev); + + if (!disable_idle_d3) + pci_set_power_state(pdev, PCI_D3hot); +} + +static void vfio_pci_release(void *device_data) +{ + struct vfio_pci_device *vdev = device_data; + + mutex_lock(&driver_lock); + + if (!(--vdev->refcnt)) { + vfio_spapr_pci_eeh_release(vdev->pdev); + vfio_pci_disable(vdev); + mutex_lock(&vdev->igate); + if (vdev->err_trigger) { + eventfd_ctx_put(vdev->err_trigger); + vdev->err_trigger = NULL; + } + mutex_unlock(&vdev->igate); + + mutex_lock(&vdev->igate); + if (vdev->req_trigger) { + eventfd_ctx_put(vdev->req_trigger); + vdev->req_trigger = NULL; + } + mutex_unlock(&vdev->igate); + } + + mutex_unlock(&driver_lock); + + module_put(THIS_MODULE); +} + +static int vfio_pci_open(void *device_data) +{ + struct vfio_pci_device *vdev = device_data; + int ret = 0; + + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + mutex_lock(&driver_lock); + + if (!vdev->refcnt) { + ret = vfio_pci_enable(vdev); + if (ret) + goto error; + + vfio_spapr_pci_eeh_open(vdev->pdev); + } + vdev->refcnt++; +error: + mutex_unlock(&driver_lock); + if (ret) + module_put(THIS_MODULE); + return ret; +} + +static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type) +{ + if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) { + u8 pin; + + if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || + vdev->nointx || vdev->pdev->is_virtfn) + return 0; + + pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin); + + return pin ? 1 : 0; + } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) { + u8 pos; + u16 flags; + + pos = vdev->pdev->msi_cap; + if (pos) { + pci_read_config_word(vdev->pdev, + pos + PCI_MSI_FLAGS, &flags); + return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1); + } + } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) { + u8 pos; + u16 flags; + + pos = vdev->pdev->msix_cap; + if (pos) { + pci_read_config_word(vdev->pdev, + pos + PCI_MSIX_FLAGS, &flags); + + return (flags & PCI_MSIX_FLAGS_QSIZE) + 1; + } + } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) { + if (pci_is_pcie(vdev->pdev)) + return 1; + } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) { + return 1; + } + + return 0; +} + +static int vfio_pci_count_devs(struct pci_dev *pdev, void *data) +{ + (*(int *)data)++; + return 0; +} + +struct vfio_pci_fill_info { + int max; + int cur; + struct vfio_pci_dependent_device *devices; +}; + +static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data) +{ + struct vfio_pci_fill_info *fill = data; + struct iommu_group *iommu_group; + + if (fill->cur == fill->max) + return -EAGAIN; /* Something changed, try again */ + + iommu_group = iommu_group_get(&pdev->dev); + if (!iommu_group) + return -EPERM; /* Cannot reset non-isolated devices */ + + fill->devices[fill->cur].group_id = iommu_group_id(iommu_group); + fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus); + fill->devices[fill->cur].bus = pdev->bus->number; + fill->devices[fill->cur].devfn = pdev->devfn; + fill->cur++; + iommu_group_put(iommu_group); + return 0; +} + +struct vfio_pci_group_entry { + struct vfio_group *group; + int id; +}; + +struct vfio_pci_group_info { + int count; + struct vfio_pci_group_entry *groups; +}; + +static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data) +{ + struct vfio_pci_group_info *info = data; + struct iommu_group *group; + int id, i; + + group = iommu_group_get(&pdev->dev); + if (!group) + return -EPERM; + + id = iommu_group_id(group); + + for (i = 0; i < info->count; i++) + if (info->groups[i].id == id) + break; + + iommu_group_put(group); + + return (i == info->count) ? -EINVAL : 0; +} + +static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot) +{ + for (; pdev; pdev = pdev->bus->self) + if (pdev->bus == slot->bus) + return (pdev->slot == slot); + return false; +} + +struct vfio_pci_walk_info { + int (*fn)(struct pci_dev *, void *data); + void *data; + struct pci_dev *pdev; + bool slot; + int ret; +}; + +static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data) +{ + struct vfio_pci_walk_info *walk = data; + + if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot)) + walk->ret = walk->fn(pdev, walk->data); + + return walk->ret; +} + +static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev, + int (*fn)(struct pci_dev *, + void *data), void *data, + bool slot) +{ + struct vfio_pci_walk_info walk = { + .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0, + }; + + pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk); + + return walk.ret; +} + +static int msix_mmappable_cap(struct vfio_pci_device *vdev, + struct vfio_info_cap *caps) +{ + struct vfio_info_cap_header header = { + .id = VFIO_REGION_INFO_CAP_MSIX_MAPPABLE, + .version = 1 + }; + + return vfio_info_add_capability(caps, &header, sizeof(header)); +} + +int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, + unsigned int type, unsigned int subtype, + const struct vfio_pci_regops *ops, + size_t size, u32 flags, void *data) +{ + struct vfio_pci_region *region; + + region = krealloc(vdev->region, + (vdev->num_regions + 1) * sizeof(*region), + GFP_KERNEL); + if (!region) + return -ENOMEM; + + vdev->region = region; + vdev->region[vdev->num_regions].type = type; + vdev->region[vdev->num_regions].subtype = subtype; + vdev->region[vdev->num_regions].ops = ops; + vdev->region[vdev->num_regions].size = size; + vdev->region[vdev->num_regions].flags = flags; + vdev->region[vdev->num_regions].data = data; + + vdev->num_regions++; + + return 0; +} + +struct vfio_devices { + struct vfio_device **devices; + int cur_index; + int max_index; +}; + +static long vfio_pci_ioctl(void *device_data, + unsigned int cmd, unsigned long arg) +{ + struct vfio_pci_device *vdev = device_data; + unsigned long minsz; + + if (cmd == VFIO_DEVICE_GET_INFO) { + struct vfio_device_info info; + + minsz = offsetofend(struct vfio_device_info, num_irqs); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + info.flags = VFIO_DEVICE_FLAGS_PCI; + + if (vdev->reset_works) + info.flags |= VFIO_DEVICE_FLAGS_RESET; + + info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions; + info.num_irqs = VFIO_PCI_NUM_IRQS; + + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { + struct pci_dev *pdev = vdev->pdev; + struct vfio_region_info info; + struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; + int i, ret; + + minsz = offsetofend(struct vfio_region_info, offset); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + switch (info.index) { + case VFIO_PCI_CONFIG_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = pdev->cfg_size; + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + break; + case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = pci_resource_len(pdev, info.index); + if (!info.size) { + info.flags = 0; + break; + } + + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + if (vdev->bar_mmap_supported[info.index]) { + info.flags |= VFIO_REGION_INFO_FLAG_MMAP; + if (info.index == vdev->msix_bar) { + ret = msix_mmappable_cap(vdev, &caps); + if (ret) + return ret; + } + } + + break; + case VFIO_PCI_ROM_REGION_INDEX: + { + void __iomem *io; + size_t size; + u16 cmd; + + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.flags = 0; + + /* Report the BAR size, not the ROM size */ + info.size = pci_resource_len(pdev, info.index); + if (!info.size) { + /* Shadow ROMs appear as PCI option ROMs */ + if (pdev->resource[PCI_ROM_RESOURCE].flags & + IORESOURCE_ROM_SHADOW) + info.size = 0x20000; + else + break; + } + + /* + * Is it really there? Enable memory decode for + * implicit access in pci_map_rom(). + */ + cmd = vfio_pci_memory_lock_and_enable(vdev); + io = pci_map_rom(pdev, &size); + if (io) { + info.flags = VFIO_REGION_INFO_FLAG_READ; + pci_unmap_rom(pdev, io); + } else { + info.size = 0; + } + vfio_pci_memory_unlock_and_restore(vdev, cmd); + + break; + } + case VFIO_PCI_VGA_REGION_INDEX: + if (!vdev->has_vga) + return -EINVAL; + + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = 0xc0000; + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + + break; + default: + { + struct vfio_region_info_cap_type cap_type = { + .header.id = VFIO_REGION_INFO_CAP_TYPE, + .header.version = 1 }; + + if (info.index >= + VFIO_PCI_NUM_REGIONS + vdev->num_regions) + return -EINVAL; + info.index = array_index_nospec(info.index, + VFIO_PCI_NUM_REGIONS + + vdev->num_regions); + + i = info.index - VFIO_PCI_NUM_REGIONS; + + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = vdev->region[i].size; + info.flags = vdev->region[i].flags; + + cap_type.type = vdev->region[i].type; + cap_type.subtype = vdev->region[i].subtype; + + ret = vfio_info_add_capability(&caps, &cap_type.header, + sizeof(cap_type)); + if (ret) + return ret; + + } + } + + if (caps.size) { + info.flags |= VFIO_REGION_INFO_FLAG_CAPS; + if (info.argsz < sizeof(info) + caps.size) { + info.argsz = sizeof(info) + caps.size; + info.cap_offset = 0; + } else { + vfio_info_cap_shift(&caps, sizeof(info)); + if (copy_to_user((void __user *)arg + + sizeof(info), caps.buf, + caps.size)) { + kfree(caps.buf); + return -EFAULT; + } + info.cap_offset = sizeof(info); + } + + kfree(caps.buf); + } + + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { + struct vfio_irq_info info; + + minsz = offsetofend(struct vfio_irq_info, count); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS) + return -EINVAL; + + switch (info.index) { + case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX: + case VFIO_PCI_REQ_IRQ_INDEX: + break; + case VFIO_PCI_ERR_IRQ_INDEX: + if (pci_is_pcie(vdev->pdev)) + break; + /* fall through */ + default: + return -EINVAL; + } + + info.flags = VFIO_IRQ_INFO_EVENTFD; + + info.count = vfio_pci_get_irq_count(vdev, info.index); + + if (info.index == VFIO_PCI_INTX_IRQ_INDEX) + info.flags |= (VFIO_IRQ_INFO_MASKABLE | + VFIO_IRQ_INFO_AUTOMASKED); + else + info.flags |= VFIO_IRQ_INFO_NORESIZE; + + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_SET_IRQS) { + struct vfio_irq_set hdr; + u8 *data = NULL; + int max, ret = 0; + size_t data_size = 0; + + minsz = offsetofend(struct vfio_irq_set, count); + + if (copy_from_user(&hdr, (void __user *)arg, minsz)) + return -EFAULT; + + max = vfio_pci_get_irq_count(vdev, hdr.index); + + ret = vfio_set_irqs_validate_and_prepare(&hdr, max, + VFIO_PCI_NUM_IRQS, &data_size); + if (ret) + return ret; + + if (data_size) { + data = memdup_user((void __user *)(arg + minsz), + data_size); + if (IS_ERR(data)) + return PTR_ERR(data); + } + + mutex_lock(&vdev->igate); + + ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index, + hdr.start, hdr.count, data); + + mutex_unlock(&vdev->igate); + kfree(data); + + return ret; + + } else if (cmd == VFIO_DEVICE_RESET) { + int ret; + + if (!vdev->reset_works) + return -EINVAL; + + vfio_pci_zap_and_down_write_memory_lock(vdev); + ret = pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + + return ret; + + } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) { + struct vfio_pci_hot_reset_info hdr; + struct vfio_pci_fill_info fill = { 0 }; + struct vfio_pci_dependent_device *devices = NULL; + bool slot = false; + int ret = 0; + + minsz = offsetofend(struct vfio_pci_hot_reset_info, count); + + if (copy_from_user(&hdr, (void __user *)arg, minsz)) + return -EFAULT; + + if (hdr.argsz < minsz) + return -EINVAL; + + hdr.flags = 0; + + /* Can we do a slot or bus reset or neither? */ + if (!pci_probe_reset_slot(vdev->pdev->slot)) + slot = true; + else if (pci_probe_reset_bus(vdev->pdev->bus)) + return -ENODEV; + + /* How many devices are affected? */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_count_devs, + &fill.max, slot); + if (ret) + return ret; + + WARN_ON(!fill.max); /* Should always be at least one */ + + /* + * If there's enough space, fill it now, otherwise return + * -ENOSPC and the number of devices affected. + */ + if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) { + ret = -ENOSPC; + hdr.count = fill.max; + goto reset_info_exit; + } + + devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL); + if (!devices) + return -ENOMEM; + + fill.devices = devices; + + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_fill_devs, + &fill, slot); + + /* + * If a device was removed between counting and filling, + * we may come up short of fill.max. If a device was + * added, we'll have a return of -EAGAIN above. + */ + if (!ret) + hdr.count = fill.cur; + +reset_info_exit: + if (copy_to_user((void __user *)arg, &hdr, minsz)) + ret = -EFAULT; + + if (!ret) { + if (copy_to_user((void __user *)(arg + minsz), devices, + hdr.count * sizeof(*devices))) + ret = -EFAULT; + } + + kfree(devices); + return ret; + + } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) { + struct vfio_pci_hot_reset hdr; + int32_t *group_fds; + struct vfio_pci_group_entry *groups; + struct vfio_pci_group_info info; + struct vfio_devices devs = { .cur_index = 0 }; + bool slot = false; + int i, group_idx, mem_idx = 0, count = 0, ret = 0; + + minsz = offsetofend(struct vfio_pci_hot_reset, count); + + if (copy_from_user(&hdr, (void __user *)arg, minsz)) + return -EFAULT; + + if (hdr.argsz < minsz || hdr.flags) + return -EINVAL; + + /* Can we do a slot or bus reset or neither? */ + if (!pci_probe_reset_slot(vdev->pdev->slot)) + slot = true; + else if (pci_probe_reset_bus(vdev->pdev->bus)) + return -ENODEV; + + /* + * We can't let userspace give us an arbitrarily large + * buffer to copy, so verify how many we think there + * could be. Note groups can have multiple devices so + * one group per device is the max. + */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_count_devs, + &count, slot); + if (ret) + return ret; + + /* Somewhere between 1 and count is OK */ + if (!hdr.count || hdr.count > count) + return -EINVAL; + + group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL); + groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL); + if (!group_fds || !groups) { + kfree(group_fds); + kfree(groups); + return -ENOMEM; + } + + if (copy_from_user(group_fds, (void __user *)(arg + minsz), + hdr.count * sizeof(*group_fds))) { + kfree(group_fds); + kfree(groups); + return -EFAULT; + } + + /* + * For each group_fd, get the group through the vfio external + * user interface and store the group and iommu ID. This + * ensures the group is held across the reset. + */ + for (group_idx = 0; group_idx < hdr.count; group_idx++) { + struct vfio_group *group; + struct fd f = fdget(group_fds[group_idx]); + if (!f.file) { + ret = -EBADF; + break; + } + + group = vfio_group_get_external_user(f.file); + fdput(f); + if (IS_ERR(group)) { + ret = PTR_ERR(group); + break; + } + + groups[group_idx].group = group; + groups[group_idx].id = + vfio_external_user_iommu_id(group); + } + + kfree(group_fds); + + /* release reference to groups on error */ + if (ret) + goto hot_reset_release; + + info.count = hdr.count; + info.groups = groups; + + /* + * Test whether all the affected devices are contained + * by the set of groups provided by the user. + */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_validate_devs, + &info, slot); + if (ret) + goto hot_reset_release; + + devs.max_index = count; + devs.devices = kcalloc(count, sizeof(struct vfio_device *), + GFP_KERNEL); + if (!devs.devices) { + ret = -ENOMEM; + goto hot_reset_release; + } + + /* + * We need to get memory_lock for each device, but devices + * can share mmap_sem, therefore we need to zap and hold + * the vma_lock for each device, and only then get each + * memory_lock. + */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_try_zap_and_vma_lock_cb, + &devs, slot); + if (ret) + goto hot_reset_release; + + for (; mem_idx < devs.cur_index; mem_idx++) { + struct vfio_pci_device *tmp; + + tmp = vfio_device_data(devs.devices[mem_idx]); + + ret = down_write_trylock(&tmp->memory_lock); + if (!ret) { + ret = -EBUSY; + goto hot_reset_release; + } + mutex_unlock(&tmp->vma_lock); + } + + /* User has access, do the reset */ + ret = pci_reset_bus(vdev->pdev); + +hot_reset_release: + for (i = 0; i < devs.cur_index; i++) { + struct vfio_device *device; + struct vfio_pci_device *tmp; + + device = devs.devices[i]; + tmp = vfio_device_data(device); + + if (i < mem_idx) + up_write(&tmp->memory_lock); + else + mutex_unlock(&tmp->vma_lock); + vfio_device_put(device); + } + kfree(devs.devices); + + for (group_idx--; group_idx >= 0; group_idx--) + vfio_group_put_external_user(groups[group_idx].group); + + kfree(groups); + return ret; + } else if (cmd == VFIO_DEVICE_IOEVENTFD) { + struct vfio_device_ioeventfd ioeventfd; + int count; + + minsz = offsetofend(struct vfio_device_ioeventfd, fd); + + if (copy_from_user(&ioeventfd, (void __user *)arg, minsz)) + return -EFAULT; + + if (ioeventfd.argsz < minsz) + return -EINVAL; + + if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK) + return -EINVAL; + + count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK; + + if (hweight8(count) != 1 || ioeventfd.fd < -1) + return -EINVAL; + + return vfio_pci_ioeventfd(vdev, ioeventfd.offset, + ioeventfd.data, count, ioeventfd.fd); + } + + return -ENOTTY; +} + +static ssize_t vfio_pci_rw(void *device_data, char __user *buf, + size_t count, loff_t *ppos, bool iswrite) +{ + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + struct vfio_pci_device *vdev = device_data; + + if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) + return -EINVAL; + + switch (index) { + case VFIO_PCI_CONFIG_REGION_INDEX: + return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite); + + case VFIO_PCI_ROM_REGION_INDEX: + if (iswrite) + return -EINVAL; + return vfio_pci_bar_rw(vdev, buf, count, ppos, false); + + case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: + return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite); + + case VFIO_PCI_VGA_REGION_INDEX: + return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite); + default: + index -= VFIO_PCI_NUM_REGIONS; + return vdev->region[index].ops->rw(vdev, buf, + count, ppos, iswrite); + } + + return -EINVAL; +} + +static ssize_t vfio_pci_read(void *device_data, char __user *buf, + size_t count, loff_t *ppos) +{ + if (!count) + return 0; + + return vfio_pci_rw(device_data, buf, count, ppos, false); +} + +static ssize_t vfio_pci_write(void *device_data, const char __user *buf, + size_t count, loff_t *ppos) +{ + if (!count) + return 0; + + return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true); +} + +/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */ +static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try) +{ + struct vfio_pci_mmap_vma *mmap_vma, *tmp; + + /* + * Lock ordering: + * vma_lock is nested under mmap_sem for vm_ops callback paths. + * The memory_lock semaphore is used by both code paths calling + * into this function to zap vmas and the vm_ops.fault callback + * to protect the memory enable state of the device. + * + * When zapping vmas we need to maintain the mmap_sem => vma_lock + * ordering, which requires using vma_lock to walk vma_list to + * acquire an mm, then dropping vma_lock to get the mmap_sem and + * reacquiring vma_lock. This logic is derived from similar + * requirements in uverbs_user_mmap_disassociate(). + * + * mmap_sem must always be the top-level lock when it is taken. + * Therefore we can only hold the memory_lock write lock when + * vma_list is empty, as we'd need to take mmap_sem to clear + * entries. vma_list can only be guaranteed empty when holding + * vma_lock, thus memory_lock is nested under vma_lock. + * + * This enables the vm_ops.fault callback to acquire vma_lock, + * followed by memory_lock read lock, while already holding + * mmap_sem without risk of deadlock. + */ + while (1) { + struct mm_struct *mm = NULL; + + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) + return 0; + } else { + mutex_lock(&vdev->vma_lock); + } + while (!list_empty(&vdev->vma_list)) { + mmap_vma = list_first_entry(&vdev->vma_list, + struct vfio_pci_mmap_vma, + vma_next); + mm = mmap_vma->vma->vm_mm; + if (mmget_not_zero(mm)) + break; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + mm = NULL; + } + if (!mm) + return 1; + mutex_unlock(&vdev->vma_lock); + + if (try) { + if (!down_read_trylock(&mm->mmap_sem)) { + mmput(mm); + return 0; + } + } else { + down_read(&mm->mmap_sem); + } + if (mmget_still_valid(mm)) { + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) { + up_read(&mm->mmap_sem); + mmput(mm); + return 0; + } + } else { + mutex_lock(&vdev->vma_lock); + } + list_for_each_entry_safe(mmap_vma, tmp, + &vdev->vma_list, vma_next) { + struct vm_area_struct *vma = mmap_vma->vma; + + if (vma->vm_mm != mm) + continue; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + + zap_vma_ptes(vma, vma->vm_start, + vma->vm_end - vma->vm_start); + } + mutex_unlock(&vdev->vma_lock); + } + up_read(&mm->mmap_sem); + mmput(mm); + } +} + +void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev) +{ + vfio_pci_zap_and_vma_lock(vdev, false); + down_write(&vdev->memory_lock); + mutex_unlock(&vdev->vma_lock); +} + +u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev) +{ + u16 cmd; + + down_write(&vdev->memory_lock); + pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MEMORY)) + pci_write_config_word(vdev->pdev, PCI_COMMAND, + cmd | PCI_COMMAND_MEMORY); + + return cmd; +} + +void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd) +{ + pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd); + up_write(&vdev->memory_lock); +} + +/* Caller holds vma_lock */ +static int __vfio_pci_add_vma(struct vfio_pci_device *vdev, + struct vm_area_struct *vma) +{ + struct vfio_pci_mmap_vma *mmap_vma; + + mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL); + if (!mmap_vma) + return -ENOMEM; + + mmap_vma->vma = vma; + list_add(&mmap_vma->vma_next, &vdev->vma_list); + + return 0; +} + +/* + * Zap mmaps on open so that we can fault them in on access and therefore + * our vma_list only tracks mappings accessed since last zap. + */ +static void vfio_pci_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void vfio_pci_mmap_close(struct vm_area_struct *vma) +{ + struct vfio_pci_device *vdev = vma->vm_private_data; + struct vfio_pci_mmap_vma *mmap_vma; + + mutex_lock(&vdev->vma_lock); + list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { + if (mmap_vma->vma == vma) { + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + break; + } + } + mutex_unlock(&vdev->vma_lock); +} + +static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct vfio_pci_device *vdev = vma->vm_private_data; + struct vfio_pci_mmap_vma *mmap_vma; + vm_fault_t ret = VM_FAULT_NOPAGE; + + mutex_lock(&vdev->vma_lock); + down_read(&vdev->memory_lock); + + if (!__vfio_pci_memory_enabled(vdev)) { + ret = VM_FAULT_SIGBUS; + goto up_out; + } + + /* + * We populate the whole vma on fault, so we need to test whether + * the vma has already been mapped, such as for concurrent faults + * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if + * we ask it to fill the same range again. + */ + list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { + if (mmap_vma->vma == vma) + goto up_out; + } + + if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) { + ret = VM_FAULT_SIGBUS; + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); + goto up_out; + } + + if (__vfio_pci_add_vma(vdev, vma)) { + ret = VM_FAULT_OOM; + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); + } + +up_out: + up_read(&vdev->memory_lock); + mutex_unlock(&vdev->vma_lock); + return ret; +} + +static const struct vm_operations_struct vfio_pci_mmap_ops = { + .open = vfio_pci_mmap_open, + .close = vfio_pci_mmap_close, + .fault = vfio_pci_mmap_fault, +}; + +static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) +{ + struct vfio_pci_device *vdev = device_data; + struct pci_dev *pdev = vdev->pdev; + unsigned int index; + u64 phys_len, req_len, pgoff, req_start; + int ret; + + index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); + + if (vma->vm_end < vma->vm_start) + return -EINVAL; + if ((vma->vm_flags & VM_SHARED) == 0) + return -EINVAL; + if (index >= VFIO_PCI_ROM_REGION_INDEX) + return -EINVAL; + if (!vdev->bar_mmap_supported[index]) + return -EINVAL; + + phys_len = PAGE_ALIGN(pci_resource_len(pdev, index)); + req_len = vma->vm_end - vma->vm_start; + pgoff = vma->vm_pgoff & + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + req_start = pgoff << PAGE_SHIFT; + + if (req_start + req_len > phys_len) + return -EINVAL; + + /* + * Even though we don't make use of the barmap for the mmap, + * we need to request the region and the barmap tracks that. + */ + if (!vdev->barmap[index]) { + ret = pci_request_selected_regions(pdev, + 1 << index, "vfio-pci"); + if (ret) + return ret; + + vdev->barmap[index] = pci_iomap(pdev, index, 0); + if (!vdev->barmap[index]) { + pci_release_selected_regions(pdev, 1 << index); + return -ENOMEM; + } + } + + vma->vm_private_data = vdev; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff; + + /* + * See remap_pfn_range(), called from vfio_pci_fault() but we can't + * change vm_flags within the fault handler. Set them now. + */ + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_ops = &vfio_pci_mmap_ops; + + return 0; +} + +static void vfio_pci_request(void *device_data, unsigned int count) +{ + struct vfio_pci_device *vdev = device_data; + + mutex_lock(&vdev->igate); + + if (vdev->req_trigger) { + if (!(count % 10)) + dev_notice_ratelimited(&vdev->pdev->dev, + "Relaying device request to user (#%u)\n", + count); + eventfd_signal(vdev->req_trigger, 1); + } else if (count == 0) { + dev_warn(&vdev->pdev->dev, + "No device request channel registered, blocked until released by user\n"); + } + + mutex_unlock(&vdev->igate); +} + +static const struct vfio_device_ops vfio_pci_ops = { + .name = "vfio-pci", + .open = vfio_pci_open, + .release = vfio_pci_release, + .ioctl = vfio_pci_ioctl, + .read = vfio_pci_read, + .write = vfio_pci_write, + .mmap = vfio_pci_mmap, + .request = vfio_pci_request, +}; + +static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct vfio_pci_device *vdev; + struct iommu_group *group; + int ret; + + if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) + return -EINVAL; + + /* + * Prevent binding to PFs with VFs enabled, this too easily allows + * userspace instance with VFs and PFs from the same device, which + * cannot work. Disabling SR-IOV here would initiate removing the + * VFs, which would unbind the driver, which is prone to blocking + * if that VF is also in use by vfio-pci. Just reject these PFs + * and let the user sort it out. + */ + if (pci_num_vf(pdev)) { + pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n"); + return -EBUSY; + } + + group = vfio_iommu_group_get(&pdev->dev); + if (!group) + return -EINVAL; + + vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); + if (!vdev) { + vfio_iommu_group_put(group, &pdev->dev); + return -ENOMEM; + } + + vdev->pdev = pdev; + vdev->irq_type = VFIO_PCI_NUM_IRQS; + mutex_init(&vdev->igate); + spin_lock_init(&vdev->irqlock); + mutex_init(&vdev->ioeventfds_lock); + INIT_LIST_HEAD(&vdev->dummy_resources_list); + INIT_LIST_HEAD(&vdev->ioeventfds_list); + mutex_init(&vdev->vma_lock); + INIT_LIST_HEAD(&vdev->vma_list); + init_rwsem(&vdev->memory_lock); + + ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); + if (ret) { + vfio_iommu_group_put(group, &pdev->dev); + kfree(vdev); + return ret; + } + + if (vfio_pci_is_vga(pdev)) { + vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode); + vga_set_legacy_decoding(pdev, + vfio_pci_set_vga_decode(vdev, false)); + } + + if (!disable_idle_d3) { + /* + * pci-core sets the device power state to an unknown value at + * bootup and after being removed from a driver. The only + * transition it allows from this unknown state is to D0, which + * typically happens when a driver calls pci_enable_device(). + * We're not ready to enable the device yet, but we do want to + * be able to get to D3. Therefore first do a D0 transition + * before going to D3. + */ + pci_set_power_state(pdev, PCI_D0); + pci_set_power_state(pdev, PCI_D3hot); + } + + return ret; +} + +static void vfio_pci_remove(struct pci_dev *pdev) +{ + struct vfio_pci_device *vdev; + + vdev = vfio_del_group_dev(&pdev->dev); + if (!vdev) + return; + + vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev); + kfree(vdev->region); + mutex_destroy(&vdev->ioeventfds_lock); + kfree(vdev); + + if (vfio_pci_is_vga(pdev)) { + vga_client_register(pdev, NULL, NULL, NULL); + vga_set_legacy_decoding(pdev, + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM | + VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM); + } + + if (!disable_idle_d3) + pci_set_power_state(pdev, PCI_D0); +} + +static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct vfio_pci_device *vdev; + struct vfio_device *device; + + device = vfio_device_get_from_dev(&pdev->dev); + if (device == NULL) + return PCI_ERS_RESULT_DISCONNECT; + + vdev = vfio_device_data(device); + if (vdev == NULL) { + vfio_device_put(device); + return PCI_ERS_RESULT_DISCONNECT; + } + + mutex_lock(&vdev->igate); + + if (vdev->err_trigger) + eventfd_signal(vdev->err_trigger, 1); + + mutex_unlock(&vdev->igate); + + vfio_device_put(device); + + return PCI_ERS_RESULT_CAN_RECOVER; +} + +static const struct pci_error_handlers vfio_err_handlers = { + .error_detected = vfio_pci_aer_err_detected, +}; + +static struct pci_driver vfio_pci_driver = { + .name = "vfio-pci", + .id_table = NULL, /* only dynamic ids */ + .probe = vfio_pci_probe, + .remove = vfio_pci_remove, + .err_handler = &vfio_err_handlers, +}; + +static int vfio_pci_get_devs(struct pci_dev *pdev, void *data) +{ + struct vfio_devices *devs = data; + struct vfio_device *device; + + if (devs->cur_index == devs->max_index) + return -ENOSPC; + + device = vfio_device_get_from_dev(&pdev->dev); + if (!device) + return -EINVAL; + + if (pci_dev_driver(pdev) != &vfio_pci_driver) { + vfio_device_put(device); + return -EBUSY; + } + + devs->devices[devs->cur_index++] = device; + return 0; +} + +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data) +{ + struct vfio_devices *devs = data; + struct vfio_device *device; + struct vfio_pci_device *vdev; + + if (devs->cur_index == devs->max_index) + return -ENOSPC; + + device = vfio_device_get_from_dev(&pdev->dev); + if (!device) + return -EINVAL; + + if (pci_dev_driver(pdev) != &vfio_pci_driver) { + vfio_device_put(device); + return -EBUSY; + } + + vdev = vfio_device_data(device); + + /* + * Locking multiple devices is prone to deadlock, runaway and + * unwind if we hit contention. + */ + if (!vfio_pci_zap_and_vma_lock(vdev, true)) { + vfio_device_put(device); + return -EBUSY; + } + + devs->devices[devs->cur_index++] = device; + return 0; +} + +/* + * Attempt to do a bus/slot reset if there are devices affected by a reset for + * this device that are needs_reset and all of the affected devices are unused + * (!refcnt). Callers are required to hold driver_lock when calling this to + * prevent device opens and concurrent bus reset attempts. We prevent device + * unbinds by acquiring and holding a reference to the vfio_device. + * + * NB: vfio-core considers a group to be viable even if some devices are + * bound to drivers like pci-stub or pcieport. Here we require all devices + * to be bound to vfio_pci since that's the only way we can be sure they + * stay put. + */ +static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev) +{ + struct vfio_devices devs = { .cur_index = 0 }; + int i = 0, ret = -EINVAL; + bool needs_reset = false, slot = false; + struct vfio_pci_device *tmp; + + if (!pci_probe_reset_slot(vdev->pdev->slot)) + slot = true; + else if (pci_probe_reset_bus(vdev->pdev->bus)) + return; + + if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs, + &i, slot) || !i) + return; + + devs.max_index = i; + devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL); + if (!devs.devices) + return; + + if (vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_get_devs, &devs, slot)) + goto put_devs; + + for (i = 0; i < devs.cur_index; i++) { + tmp = vfio_device_data(devs.devices[i]); + if (tmp->needs_reset) + needs_reset = true; + if (tmp->refcnt) + goto put_devs; + } + + if (needs_reset) + ret = pci_reset_bus(vdev->pdev); + +put_devs: + for (i = 0; i < devs.cur_index; i++) { + tmp = vfio_device_data(devs.devices[i]); + if (!ret) + tmp->needs_reset = false; + + if (!tmp->refcnt && !disable_idle_d3) + pci_set_power_state(tmp->pdev, PCI_D3hot); + + vfio_device_put(devs.devices[i]); + } + + kfree(devs.devices); +} + +static void __exit vfio_pci_cleanup(void) +{ + pci_unregister_driver(&vfio_pci_driver); + vfio_pci_uninit_perm_bits(); +} + +static void __init vfio_pci_fill_ids(void) +{ + char *p, *id; + int rc; + + /* no ids passed actually */ + if (ids[0] == '\0') + return; + + /* add ids specified in the module parameter */ + p = ids; + while ((id = strsep(&p, ","))) { + unsigned int vendor, device, subvendor = PCI_ANY_ID, + subdevice = PCI_ANY_ID, class = 0, class_mask = 0; + int fields; + + if (!strlen(id)) + continue; + + fields = sscanf(id, "%x:%x:%x:%x:%x:%x", + &vendor, &device, &subvendor, &subdevice, + &class, &class_mask); + + if (fields < 2) { + pr_warn("invalid id string \"%s\"\n", id); + continue; + } + + rc = pci_add_dynid(&vfio_pci_driver, vendor, device, + subvendor, subdevice, class, class_mask, 0); + if (rc) + pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n", + vendor, device, subvendor, subdevice, + class, class_mask, rc); + else + pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n", + vendor, device, subvendor, subdevice, + class, class_mask); + } +} + +static int __init vfio_pci_init(void) +{ + int ret; + + /* Allocate shared config space permision data used by all devices */ + ret = vfio_pci_init_perm_bits(); + if (ret) + return ret; + + /* Register and scan for devices */ + ret = pci_register_driver(&vfio_pci_driver); + if (ret) + goto out_driver; + + vfio_pci_fill_ids(); + + return 0; + +out_driver: + vfio_pci_uninit_perm_bits(); + return ret; +} + +module_init(vfio_pci_init); +module_exit(vfio_pci_cleanup); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c new file mode 100644 index 000000000..86e917f1c --- /dev/null +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -0,0 +1,1908 @@ +/* + * VFIO PCI config space virtualization + * + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + */ + +/* + * This code handles reading and writing of PCI configuration registers. + * This is hairy because we want to allow a lot of flexibility to the + * user driver, but cannot trust it with all of the config fields. + * Tables determine which fields can be read and written, as well as + * which fields are 'virtualized' - special actions and translations to + * make it appear to the user that he has control, when in fact things + * must be negotiated with the underlying OS. + */ + +#include <linux/fs.h> +#include <linux/pci.h> +#include <linux/uaccess.h> +#include <linux/vfio.h> +#include <linux/slab.h> + +#include "vfio_pci_private.h" + +/* Fake capability ID for standard config space */ +#define PCI_CAP_ID_BASIC 0 + +#define is_bar(offset) \ + ((offset >= PCI_BASE_ADDRESS_0 && offset < PCI_BASE_ADDRESS_5 + 4) || \ + (offset >= PCI_ROM_ADDRESS && offset < PCI_ROM_ADDRESS + 4)) + +/* + * Lengths of PCI Config Capabilities + * 0: Removed from the user visible capability list + * FF: Variable length + */ +static const u8 pci_cap_length[PCI_CAP_ID_MAX + 1] = { + [PCI_CAP_ID_BASIC] = PCI_STD_HEADER_SIZEOF, /* pci config header */ + [PCI_CAP_ID_PM] = PCI_PM_SIZEOF, + [PCI_CAP_ID_AGP] = PCI_AGP_SIZEOF, + [PCI_CAP_ID_VPD] = PCI_CAP_VPD_SIZEOF, + [PCI_CAP_ID_SLOTID] = 0, /* bridge - don't care */ + [PCI_CAP_ID_MSI] = 0xFF, /* 10, 14, 20, or 24 */ + [PCI_CAP_ID_CHSWP] = 0, /* cpci - not yet */ + [PCI_CAP_ID_PCIX] = 0xFF, /* 8 or 24 */ + [PCI_CAP_ID_HT] = 0xFF, /* hypertransport */ + [PCI_CAP_ID_VNDR] = 0xFF, /* variable */ + [PCI_CAP_ID_DBG] = 0, /* debug - don't care */ + [PCI_CAP_ID_CCRC] = 0, /* cpci - not yet */ + [PCI_CAP_ID_SHPC] = 0, /* hotswap - not yet */ + [PCI_CAP_ID_SSVID] = 0, /* bridge - don't care */ + [PCI_CAP_ID_AGP3] = 0, /* AGP8x - not yet */ + [PCI_CAP_ID_SECDEV] = 0, /* secure device not yet */ + [PCI_CAP_ID_EXP] = 0xFF, /* 20 or 44 */ + [PCI_CAP_ID_MSIX] = PCI_CAP_MSIX_SIZEOF, + [PCI_CAP_ID_SATA] = 0xFF, + [PCI_CAP_ID_AF] = PCI_CAP_AF_SIZEOF, +}; + +/* + * Lengths of PCIe/PCI-X Extended Config Capabilities + * 0: Removed or masked from the user visible capability list + * FF: Variable length + */ +static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = { + [PCI_EXT_CAP_ID_ERR] = PCI_ERR_ROOT_COMMAND, + [PCI_EXT_CAP_ID_VC] = 0xFF, + [PCI_EXT_CAP_ID_DSN] = PCI_EXT_CAP_DSN_SIZEOF, + [PCI_EXT_CAP_ID_PWR] = PCI_EXT_CAP_PWR_SIZEOF, + [PCI_EXT_CAP_ID_RCLD] = 0, /* root only - don't care */ + [PCI_EXT_CAP_ID_RCILC] = 0, /* root only - don't care */ + [PCI_EXT_CAP_ID_RCEC] = 0, /* root only - don't care */ + [PCI_EXT_CAP_ID_MFVC] = 0xFF, + [PCI_EXT_CAP_ID_VC9] = 0xFF, /* same as CAP_ID_VC */ + [PCI_EXT_CAP_ID_RCRB] = 0, /* root only - don't care */ + [PCI_EXT_CAP_ID_VNDR] = 0xFF, + [PCI_EXT_CAP_ID_CAC] = 0, /* obsolete */ + [PCI_EXT_CAP_ID_ACS] = 0xFF, + [PCI_EXT_CAP_ID_ARI] = PCI_EXT_CAP_ARI_SIZEOF, + [PCI_EXT_CAP_ID_ATS] = PCI_EXT_CAP_ATS_SIZEOF, + [PCI_EXT_CAP_ID_SRIOV] = PCI_EXT_CAP_SRIOV_SIZEOF, + [PCI_EXT_CAP_ID_MRIOV] = 0, /* not yet */ + [PCI_EXT_CAP_ID_MCAST] = PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF, + [PCI_EXT_CAP_ID_PRI] = PCI_EXT_CAP_PRI_SIZEOF, + [PCI_EXT_CAP_ID_AMD_XXX] = 0, /* not yet */ + [PCI_EXT_CAP_ID_REBAR] = 0xFF, + [PCI_EXT_CAP_ID_DPA] = 0xFF, + [PCI_EXT_CAP_ID_TPH] = 0xFF, + [PCI_EXT_CAP_ID_LTR] = PCI_EXT_CAP_LTR_SIZEOF, + [PCI_EXT_CAP_ID_SECPCI] = 0, /* not yet */ + [PCI_EXT_CAP_ID_PMUX] = 0, /* not yet */ + [PCI_EXT_CAP_ID_PASID] = 0, /* not yet */ +}; + +/* + * Read/Write Permission Bits - one bit for each bit in capability + * Any field can be read if it exists, but what is read depends on + * whether the field is 'virtualized', or just pass thru to the + * hardware. Any virtualized field is also virtualized for writes. + * Writes are only permitted if they have a 1 bit here. + */ +struct perm_bits { + u8 *virt; /* read/write virtual data, not hw */ + u8 *write; /* writeable bits */ + int (*readfn)(struct vfio_pci_device *vdev, int pos, int count, + struct perm_bits *perm, int offset, __le32 *val); + int (*writefn)(struct vfio_pci_device *vdev, int pos, int count, + struct perm_bits *perm, int offset, __le32 val); +}; + +#define NO_VIRT 0 +#define ALL_VIRT 0xFFFFFFFFU +#define NO_WRITE 0 +#define ALL_WRITE 0xFFFFFFFFU + +static int vfio_user_config_read(struct pci_dev *pdev, int offset, + __le32 *val, int count) +{ + int ret = -EINVAL; + u32 tmp_val = 0; + + switch (count) { + case 1: + { + u8 tmp; + ret = pci_user_read_config_byte(pdev, offset, &tmp); + tmp_val = tmp; + break; + } + case 2: + { + u16 tmp; + ret = pci_user_read_config_word(pdev, offset, &tmp); + tmp_val = tmp; + break; + } + case 4: + ret = pci_user_read_config_dword(pdev, offset, &tmp_val); + break; + } + + *val = cpu_to_le32(tmp_val); + + return ret; +} + +static int vfio_user_config_write(struct pci_dev *pdev, int offset, + __le32 val, int count) +{ + int ret = -EINVAL; + u32 tmp_val = le32_to_cpu(val); + + switch (count) { + case 1: + ret = pci_user_write_config_byte(pdev, offset, tmp_val); + break; + case 2: + ret = pci_user_write_config_word(pdev, offset, tmp_val); + break; + case 4: + ret = pci_user_write_config_dword(pdev, offset, tmp_val); + break; + } + + return ret; +} + +static int vfio_default_config_read(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 *val) +{ + __le32 virt = 0; + + memcpy(val, vdev->vconfig + pos, count); + + memcpy(&virt, perm->virt + offset, count); + + /* Any non-virtualized bits? */ + if (cpu_to_le32(~0U >> (32 - (count * 8))) != virt) { + struct pci_dev *pdev = vdev->pdev; + __le32 phys_val = 0; + int ret; + + ret = vfio_user_config_read(pdev, pos, &phys_val, count); + if (ret) + return ret; + + *val = (phys_val & ~virt) | (*val & virt); + } + + return count; +} + +static int vfio_default_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + __le32 virt = 0, write = 0; + + memcpy(&write, perm->write + offset, count); + + if (!write) + return count; /* drop, no writable bits */ + + memcpy(&virt, perm->virt + offset, count); + + /* Virtualized and writable bits go to vconfig */ + if (write & virt) { + __le32 virt_val = 0; + + memcpy(&virt_val, vdev->vconfig + pos, count); + + virt_val &= ~(write & virt); + virt_val |= (val & (write & virt)); + + memcpy(vdev->vconfig + pos, &virt_val, count); + } + + /* Non-virtualzed and writable bits go to hardware */ + if (write & ~virt) { + struct pci_dev *pdev = vdev->pdev; + __le32 phys_val = 0; + int ret; + + ret = vfio_user_config_read(pdev, pos, &phys_val, count); + if (ret) + return ret; + + phys_val &= ~(write & ~virt); + phys_val |= (val & (write & ~virt)); + + ret = vfio_user_config_write(pdev, pos, phys_val, count); + if (ret) + return ret; + } + + return count; +} + +/* Allow direct read from hardware, except for capability next pointer */ +static int vfio_direct_config_read(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 *val) +{ + int ret; + + ret = vfio_user_config_read(vdev->pdev, pos, val, count); + if (ret) + return ret; + + if (pos >= PCI_CFG_SPACE_SIZE) { /* Extended cap header mangling */ + if (offset < 4) + memcpy(val, vdev->vconfig + pos, count); + } else if (pos >= PCI_STD_HEADER_SIZEOF) { /* Std cap mangling */ + if (offset == PCI_CAP_LIST_ID && count > 1) + memcpy(val, vdev->vconfig + pos, + min(PCI_CAP_FLAGS, count)); + else if (offset == PCI_CAP_LIST_NEXT) + memcpy(val, vdev->vconfig + pos, 1); + } + + return count; +} + +/* Raw access skips any kind of virtualization */ +static int vfio_raw_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + int ret; + + ret = vfio_user_config_write(vdev->pdev, pos, val, count); + if (ret) + return ret; + + return count; +} + +static int vfio_raw_config_read(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 *val) +{ + int ret; + + ret = vfio_user_config_read(vdev->pdev, pos, val, count); + if (ret) + return ret; + + return count; +} + +/* Virt access uses only virtualization */ +static int vfio_virt_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + memcpy(vdev->vconfig + pos, &val, count); + return count; +} + +static int vfio_virt_config_read(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 *val) +{ + memcpy(val, vdev->vconfig + pos, count); + return count; +} + +/* Default capability regions to read-only, no-virtualization */ +static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = { + [0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read } +}; +static struct perm_bits ecap_perms[PCI_EXT_CAP_ID_MAX + 1] = { + [0 ... PCI_EXT_CAP_ID_MAX] = { .readfn = vfio_direct_config_read } +}; +/* + * Default unassigned regions to raw read-write access. Some devices + * require this to function as they hide registers between the gaps in + * config space (be2net). Like MMIO and I/O port registers, we have + * to trust the hardware isolation. + */ +static struct perm_bits unassigned_perms = { + .readfn = vfio_raw_config_read, + .writefn = vfio_raw_config_write +}; + +static struct perm_bits virt_perms = { + .readfn = vfio_virt_config_read, + .writefn = vfio_virt_config_write +}; + +static void free_perm_bits(struct perm_bits *perm) +{ + kfree(perm->virt); + kfree(perm->write); + perm->virt = NULL; + perm->write = NULL; +} + +static int alloc_perm_bits(struct perm_bits *perm, int size) +{ + /* + * Round up all permission bits to the next dword, this lets us + * ignore whether a read/write exceeds the defined capability + * structure. We can do this because: + * - Standard config space is already dword aligned + * - Capabilities are all dword aligned (bits 0:1 of next reserved) + * - Express capabilities defined as dword aligned + */ + size = round_up(size, 4); + + /* + * Zero state is + * - All Readable, None Writeable, None Virtualized + */ + perm->virt = kzalloc(size, GFP_KERNEL); + perm->write = kzalloc(size, GFP_KERNEL); + if (!perm->virt || !perm->write) { + free_perm_bits(perm); + return -ENOMEM; + } + + perm->readfn = vfio_default_config_read; + perm->writefn = vfio_default_config_write; + + return 0; +} + +/* + * Helper functions for filling in permission tables + */ +static inline void p_setb(struct perm_bits *p, int off, u8 virt, u8 write) +{ + p->virt[off] = virt; + p->write[off] = write; +} + +/* Handle endian-ness - pci and tables are little-endian */ +static inline void p_setw(struct perm_bits *p, int off, u16 virt, u16 write) +{ + *(__le16 *)(&p->virt[off]) = cpu_to_le16(virt); + *(__le16 *)(&p->write[off]) = cpu_to_le16(write); +} + +/* Handle endian-ness - pci and tables are little-endian */ +static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) +{ + *(__le32 *)(&p->virt[off]) = cpu_to_le32(virt); + *(__le32 *)(&p->write[off]) = cpu_to_le32(write); +} + +/* Caller should hold memory_lock semaphore */ +bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); + + /* + * SR-IOV VF memory enable is handled by the MSE bit in the + * PF SR-IOV capability, there's therefore no need to trigger + * faults based on the virtual value. + */ + return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY); +} + +/* + * Restore the *real* BARs after we detect a FLR or backdoor reset. + * (backdoor = some device specific technique that we didn't catch) + */ +static void vfio_bar_restore(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + u32 *rbar = vdev->rbar; + u16 cmd; + int i; + + if (pdev->is_virtfn) + return; + + pr_info("%s: %s reset recovery - restoring bars\n", + __func__, dev_name(&pdev->dev)); + + for (i = PCI_BASE_ADDRESS_0; i <= PCI_BASE_ADDRESS_5; i += 4, rbar++) + pci_user_write_config_dword(pdev, i, *rbar); + + pci_user_write_config_dword(pdev, PCI_ROM_ADDRESS, *rbar); + + if (vdev->nointx) { + pci_user_read_config_word(pdev, PCI_COMMAND, &cmd); + cmd |= PCI_COMMAND_INTX_DISABLE; + pci_user_write_config_word(pdev, PCI_COMMAND, cmd); + } +} + +static __le32 vfio_generate_bar_flags(struct pci_dev *pdev, int bar) +{ + unsigned long flags = pci_resource_flags(pdev, bar); + u32 val; + + if (flags & IORESOURCE_IO) + return cpu_to_le32(PCI_BASE_ADDRESS_SPACE_IO); + + val = PCI_BASE_ADDRESS_SPACE_MEMORY; + + if (flags & IORESOURCE_PREFETCH) + val |= PCI_BASE_ADDRESS_MEM_PREFETCH; + + if (flags & IORESOURCE_MEM_64) + val |= PCI_BASE_ADDRESS_MEM_TYPE_64; + + return cpu_to_le32(val); +} + +/* + * Pretend we're hardware and tweak the values of the *virtual* PCI BARs + * to reflect the hardware capabilities. This implements BAR sizing. + */ +static void vfio_bar_fixup(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + int i; + __le32 *bar; + u64 mask; + + bar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0]; + + for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++, bar++) { + if (!pci_resource_start(pdev, i)) { + *bar = 0; /* Unmapped by host = unimplemented to user */ + continue; + } + + mask = ~(pci_resource_len(pdev, i) - 1); + + *bar &= cpu_to_le32((u32)mask); + *bar |= vfio_generate_bar_flags(pdev, i); + + if (*bar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) { + bar++; + *bar &= cpu_to_le32((u32)(mask >> 32)); + i++; + } + } + + bar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS]; + + /* + * NB. REGION_INFO will have reported zero size if we weren't able + * to read the ROM, but we still return the actual BAR size here if + * it exists (or the shadow ROM space). + */ + if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) { + mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1); + mask |= PCI_ROM_ADDRESS_ENABLE; + *bar &= cpu_to_le32((u32)mask); + } else if (pdev->resource[PCI_ROM_RESOURCE].flags & + IORESOURCE_ROM_SHADOW) { + mask = ~(0x20000 - 1); + mask |= PCI_ROM_ADDRESS_ENABLE; + *bar &= cpu_to_le32((u32)mask); + } else + *bar = 0; + + vdev->bardirty = false; +} + +static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 *val) +{ + if (is_bar(offset)) /* pos == offset for basic config */ + vfio_bar_fixup(vdev); + + count = vfio_default_config_read(vdev, pos, count, perm, offset, val); + + /* Mask in virtual memory enable for SR-IOV devices */ + if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) { + u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); + u32 tmp_val = le32_to_cpu(*val); + + tmp_val |= cmd & PCI_COMMAND_MEMORY; + *val = cpu_to_le32(tmp_val); + } + + return count; +} + +/* Test whether BARs match the value we think they should contain */ +static bool vfio_need_bar_restore(struct vfio_pci_device *vdev) +{ + int i = 0, pos = PCI_BASE_ADDRESS_0, ret; + u32 bar; + + for (; pos <= PCI_BASE_ADDRESS_5; i++, pos += 4) { + if (vdev->rbar[i]) { + ret = pci_user_read_config_dword(vdev->pdev, pos, &bar); + if (ret || vdev->rbar[i] != bar) + return true; + } + } + + return false; +} + +static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + struct pci_dev *pdev = vdev->pdev; + __le16 *virt_cmd; + u16 new_cmd = 0; + int ret; + + virt_cmd = (__le16 *)&vdev->vconfig[PCI_COMMAND]; + + if (offset == PCI_COMMAND) { + bool phys_mem, virt_mem, new_mem, phys_io, virt_io, new_io; + u16 phys_cmd; + + ret = pci_user_read_config_word(pdev, PCI_COMMAND, &phys_cmd); + if (ret) + return ret; + + new_cmd = le32_to_cpu(val); + + phys_io = !!(phys_cmd & PCI_COMMAND_IO); + virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); + new_io = !!(new_cmd & PCI_COMMAND_IO); + + phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY); + virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY); + new_mem = !!(new_cmd & PCI_COMMAND_MEMORY); + + if (!new_mem) + vfio_pci_zap_and_down_write_memory_lock(vdev); + else + down_write(&vdev->memory_lock); + + /* + * If the user is writing mem/io enable (new_mem/io) and we + * think it's already enabled (virt_mem/io), but the hardware + * shows it disabled (phys_mem/io, then the device has + * undergone some kind of backdoor reset and needs to be + * restored before we allow it to enable the bars. + * SR-IOV devices will trigger this, but we catch them later + */ + if ((new_mem && virt_mem && !phys_mem) || + (new_io && virt_io && !phys_io) || + vfio_need_bar_restore(vdev)) + vfio_bar_restore(vdev); + } + + count = vfio_default_config_write(vdev, pos, count, perm, offset, val); + if (count < 0) { + if (offset == PCI_COMMAND) + up_write(&vdev->memory_lock); + return count; + } + + /* + * Save current memory/io enable bits in vconfig to allow for + * the test above next time. + */ + if (offset == PCI_COMMAND) { + u16 mask = PCI_COMMAND_MEMORY | PCI_COMMAND_IO; + + *virt_cmd &= cpu_to_le16(~mask); + *virt_cmd |= cpu_to_le16(new_cmd & mask); + + up_write(&vdev->memory_lock); + } + + /* Emulate INTx disable */ + if (offset >= PCI_COMMAND && offset <= PCI_COMMAND + 1) { + bool virt_intx_disable; + + virt_intx_disable = !!(le16_to_cpu(*virt_cmd) & + PCI_COMMAND_INTX_DISABLE); + + if (virt_intx_disable && !vdev->virq_disabled) { + vdev->virq_disabled = true; + vfio_pci_intx_mask(vdev); + } else if (!virt_intx_disable && vdev->virq_disabled) { + vdev->virq_disabled = false; + vfio_pci_intx_unmask(vdev); + } + } + + if (is_bar(offset)) + vdev->bardirty = true; + + return count; +} + +/* Permissions for the Basic PCI Header */ +static int __init init_pci_cap_basic_perm(struct perm_bits *perm) +{ + if (alloc_perm_bits(perm, PCI_STD_HEADER_SIZEOF)) + return -ENOMEM; + + perm->readfn = vfio_basic_config_read; + perm->writefn = vfio_basic_config_write; + + /* Virtualized for SR-IOV functions, which just have FFFF */ + p_setw(perm, PCI_VENDOR_ID, (u16)ALL_VIRT, NO_WRITE); + p_setw(perm, PCI_DEVICE_ID, (u16)ALL_VIRT, NO_WRITE); + + /* + * Virtualize INTx disable, we use it internally for interrupt + * control and can emulate it for non-PCI 2.3 devices. + */ + p_setw(perm, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE, (u16)ALL_WRITE); + + /* Virtualize capability list, we might want to skip/disable */ + p_setw(perm, PCI_STATUS, PCI_STATUS_CAP_LIST, NO_WRITE); + + /* No harm to write */ + p_setb(perm, PCI_CACHE_LINE_SIZE, NO_VIRT, (u8)ALL_WRITE); + p_setb(perm, PCI_LATENCY_TIMER, NO_VIRT, (u8)ALL_WRITE); + p_setb(perm, PCI_BIST, NO_VIRT, (u8)ALL_WRITE); + + /* Virtualize all bars, can't touch the real ones */ + p_setd(perm, PCI_BASE_ADDRESS_0, ALL_VIRT, ALL_WRITE); + p_setd(perm, PCI_BASE_ADDRESS_1, ALL_VIRT, ALL_WRITE); + p_setd(perm, PCI_BASE_ADDRESS_2, ALL_VIRT, ALL_WRITE); + p_setd(perm, PCI_BASE_ADDRESS_3, ALL_VIRT, ALL_WRITE); + p_setd(perm, PCI_BASE_ADDRESS_4, ALL_VIRT, ALL_WRITE); + p_setd(perm, PCI_BASE_ADDRESS_5, ALL_VIRT, ALL_WRITE); + p_setd(perm, PCI_ROM_ADDRESS, ALL_VIRT, ALL_WRITE); + + /* Allow us to adjust capability chain */ + p_setb(perm, PCI_CAPABILITY_LIST, (u8)ALL_VIRT, NO_WRITE); + + /* Sometimes used by sw, just virtualize */ + p_setb(perm, PCI_INTERRUPT_LINE, (u8)ALL_VIRT, (u8)ALL_WRITE); + + /* Virtualize interrupt pin to allow hiding INTx */ + p_setb(perm, PCI_INTERRUPT_PIN, (u8)ALL_VIRT, (u8)NO_WRITE); + + return 0; +} + +static int vfio_pm_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + count = vfio_default_config_write(vdev, pos, count, perm, offset, val); + if (count < 0) + return count; + + if (offset == PCI_PM_CTRL) { + pci_power_t state; + + switch (le32_to_cpu(val) & PCI_PM_CTRL_STATE_MASK) { + case 0: + state = PCI_D0; + break; + case 1: + state = PCI_D1; + break; + case 2: + state = PCI_D2; + break; + case 3: + state = PCI_D3hot; + break; + } + + pci_set_power_state(vdev->pdev, state); + } + + return count; +} + +/* Permissions for the Power Management capability */ +static int __init init_pci_cap_pm_perm(struct perm_bits *perm) +{ + if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_PM])) + return -ENOMEM; + + perm->writefn = vfio_pm_config_write; + + /* + * We always virtualize the next field so we can remove + * capabilities from the chain if we want to. + */ + p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); + + /* + * Power management is defined *per function*, so we can let + * the user change power state, but we trap and initiate the + * change ourselves, so the state bits are read-only. + */ + p_setd(perm, PCI_PM_CTRL, NO_VIRT, ~PCI_PM_CTRL_STATE_MASK); + return 0; +} + +static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + struct pci_dev *pdev = vdev->pdev; + __le16 *paddr = (__le16 *)(vdev->vconfig + pos - offset + PCI_VPD_ADDR); + __le32 *pdata = (__le32 *)(vdev->vconfig + pos - offset + PCI_VPD_DATA); + u16 addr; + u32 data; + + /* + * Write through to emulation. If the write includes the upper byte + * of PCI_VPD_ADDR, then the PCI_VPD_ADDR_F bit is written and we + * have work to do. + */ + count = vfio_default_config_write(vdev, pos, count, perm, offset, val); + if (count < 0 || offset > PCI_VPD_ADDR + 1 || + offset + count <= PCI_VPD_ADDR + 1) + return count; + + addr = le16_to_cpu(*paddr); + + if (addr & PCI_VPD_ADDR_F) { + data = le32_to_cpu(*pdata); + if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4) + return count; + } else { + data = 0; + if (pci_read_vpd(pdev, addr, 4, &data) < 0) + return count; + *pdata = cpu_to_le32(data); + } + + /* + * Toggle PCI_VPD_ADDR_F in the emulated PCI_VPD_ADDR register to + * signal completion. If an error occurs above, we assume that not + * toggling this bit will induce a driver timeout. + */ + addr ^= PCI_VPD_ADDR_F; + *paddr = cpu_to_le16(addr); + + return count; +} + +/* Permissions for Vital Product Data capability */ +static int __init init_pci_cap_vpd_perm(struct perm_bits *perm) +{ + if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_VPD])) + return -ENOMEM; + + perm->writefn = vfio_vpd_config_write; + + /* + * We always virtualize the next field so we can remove + * capabilities from the chain if we want to. + */ + p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); + + /* + * Both the address and data registers are virtualized to + * enable access through the pci_vpd_read/write functions + */ + p_setw(perm, PCI_VPD_ADDR, (u16)ALL_VIRT, (u16)ALL_WRITE); + p_setd(perm, PCI_VPD_DATA, ALL_VIRT, ALL_WRITE); + + return 0; +} + +/* Permissions for PCI-X capability */ +static int __init init_pci_cap_pcix_perm(struct perm_bits *perm) +{ + /* Alloc 24, but only 8 are used in v0 */ + if (alloc_perm_bits(perm, PCI_CAP_PCIX_SIZEOF_V2)) + return -ENOMEM; + + p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); + + p_setw(perm, PCI_X_CMD, NO_VIRT, (u16)ALL_WRITE); + p_setd(perm, PCI_X_ECC_CSR, NO_VIRT, ALL_WRITE); + return 0; +} + +static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + __le16 *ctrl = (__le16 *)(vdev->vconfig + pos - + offset + PCI_EXP_DEVCTL); + int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ; + + count = vfio_default_config_write(vdev, pos, count, perm, offset, val); + if (count < 0) + return count; + + /* + * The FLR bit is virtualized, if set and the device supports PCIe + * FLR, issue a reset_function. Regardless, clear the bit, the spec + * requires it to be always read as zero. NB, reset_function might + * not use a PCIe FLR, we don't have that level of granularity. + */ + if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) { + u32 cap; + int ret; + + *ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR); + + ret = pci_user_read_config_dword(vdev->pdev, + pos - offset + PCI_EXP_DEVCAP, + &cap); + + if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); + pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } + } + + /* + * MPS is virtualized to the user, writes do not change the physical + * register since determining a proper MPS value requires a system wide + * device view. The MRRS is largely independent of MPS, but since the + * user does not have that system-wide view, they might set a safe, but + * inefficiently low value. Here we allow writes through to hardware, + * but we set the floor to the physical device MPS setting, so that + * we can at least use full TLPs, as defined by the MPS value. + * + * NB, if any devices actually depend on an artificially low MRRS + * setting, this will need to be revisited, perhaps with a quirk + * though pcie_set_readrq(). + */ + if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) { + readrq = 128 << + ((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12); + readrq = max(readrq, pcie_get_mps(vdev->pdev)); + + pcie_set_readrq(vdev->pdev, readrq); + } + + return count; +} + +/* Permissions for PCI Express capability */ +static int __init init_pci_cap_exp_perm(struct perm_bits *perm) +{ + /* Alloc largest of possible sizes */ + if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2)) + return -ENOMEM; + + perm->writefn = vfio_exp_config_write; + + p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); + + /* + * Allow writes to device control fields, except devctl_phantom, + * which could confuse IOMMU, MPS, which can break communication + * with other physical devices, and the ARI bit in devctl2, which + * is set at probe time. FLR and MRRS get virtualized via our + * writefn. + */ + p_setw(perm, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD | + PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM); + p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI); + return 0; +} + +static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL; + + count = vfio_default_config_write(vdev, pos, count, perm, offset, val); + if (count < 0) + return count; + + /* + * The FLR bit is virtualized, if set and the device supports AF + * FLR, issue a reset_function. Regardless, clear the bit, the spec + * requires it to be always read as zero. NB, reset_function might + * not use an AF FLR, we don't have that level of granularity. + */ + if (*ctrl & PCI_AF_CTRL_FLR) { + u8 cap; + int ret; + + *ctrl &= ~PCI_AF_CTRL_FLR; + + ret = pci_user_read_config_byte(vdev->pdev, + pos - offset + PCI_AF_CAP, + &cap); + + if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); + pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } + } + + return count; +} + +/* Permissions for Advanced Function capability */ +static int __init init_pci_cap_af_perm(struct perm_bits *perm) +{ + if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF])) + return -ENOMEM; + + perm->writefn = vfio_af_config_write; + + p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); + p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR); + return 0; +} + +/* Permissions for Advanced Error Reporting extended capability */ +static int __init init_pci_ext_cap_err_perm(struct perm_bits *perm) +{ + u32 mask; + + if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_ERR])) + return -ENOMEM; + + /* + * Virtualize the first dword of all express capabilities + * because it includes the next pointer. This lets us later + * remove capabilities from the chain if we need to. + */ + p_setd(perm, 0, ALL_VIRT, NO_WRITE); + + /* Writable bits mask */ + mask = PCI_ERR_UNC_UND | /* Undefined */ + PCI_ERR_UNC_DLP | /* Data Link Protocol */ + PCI_ERR_UNC_SURPDN | /* Surprise Down */ + PCI_ERR_UNC_POISON_TLP | /* Poisoned TLP */ + PCI_ERR_UNC_FCP | /* Flow Control Protocol */ + PCI_ERR_UNC_COMP_TIME | /* Completion Timeout */ + PCI_ERR_UNC_COMP_ABORT | /* Completer Abort */ + PCI_ERR_UNC_UNX_COMP | /* Unexpected Completion */ + PCI_ERR_UNC_RX_OVER | /* Receiver Overflow */ + PCI_ERR_UNC_MALF_TLP | /* Malformed TLP */ + PCI_ERR_UNC_ECRC | /* ECRC Error Status */ + PCI_ERR_UNC_UNSUP | /* Unsupported Request */ + PCI_ERR_UNC_ACSV | /* ACS Violation */ + PCI_ERR_UNC_INTN | /* internal error */ + PCI_ERR_UNC_MCBTLP | /* MC blocked TLP */ + PCI_ERR_UNC_ATOMEG | /* Atomic egress blocked */ + PCI_ERR_UNC_TLPPRE; /* TLP prefix blocked */ + p_setd(perm, PCI_ERR_UNCOR_STATUS, NO_VIRT, mask); + p_setd(perm, PCI_ERR_UNCOR_MASK, NO_VIRT, mask); + p_setd(perm, PCI_ERR_UNCOR_SEVER, NO_VIRT, mask); + + mask = PCI_ERR_COR_RCVR | /* Receiver Error Status */ + PCI_ERR_COR_BAD_TLP | /* Bad TLP Status */ + PCI_ERR_COR_BAD_DLLP | /* Bad DLLP Status */ + PCI_ERR_COR_REP_ROLL | /* REPLAY_NUM Rollover */ + PCI_ERR_COR_REP_TIMER | /* Replay Timer Timeout */ + PCI_ERR_COR_ADV_NFAT | /* Advisory Non-Fatal */ + PCI_ERR_COR_INTERNAL | /* Corrected Internal */ + PCI_ERR_COR_LOG_OVER; /* Header Log Overflow */ + p_setd(perm, PCI_ERR_COR_STATUS, NO_VIRT, mask); + p_setd(perm, PCI_ERR_COR_MASK, NO_VIRT, mask); + + mask = PCI_ERR_CAP_ECRC_GENE | /* ECRC Generation Enable */ + PCI_ERR_CAP_ECRC_CHKE; /* ECRC Check Enable */ + p_setd(perm, PCI_ERR_CAP, NO_VIRT, mask); + return 0; +} + +/* Permissions for Power Budgeting extended capability */ +static int __init init_pci_ext_cap_pwr_perm(struct perm_bits *perm) +{ + if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_PWR])) + return -ENOMEM; + + p_setd(perm, 0, ALL_VIRT, NO_WRITE); + + /* Writing the data selector is OK, the info is still read-only */ + p_setb(perm, PCI_PWR_DATA, NO_VIRT, (u8)ALL_WRITE); + return 0; +} + +/* + * Initialize the shared permission tables + */ +void vfio_pci_uninit_perm_bits(void) +{ + free_perm_bits(&cap_perms[PCI_CAP_ID_BASIC]); + + free_perm_bits(&cap_perms[PCI_CAP_ID_PM]); + free_perm_bits(&cap_perms[PCI_CAP_ID_VPD]); + free_perm_bits(&cap_perms[PCI_CAP_ID_PCIX]); + free_perm_bits(&cap_perms[PCI_CAP_ID_EXP]); + free_perm_bits(&cap_perms[PCI_CAP_ID_AF]); + + free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_ERR]); + free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_PWR]); +} + +int __init vfio_pci_init_perm_bits(void) +{ + int ret; + + /* Basic config space */ + ret = init_pci_cap_basic_perm(&cap_perms[PCI_CAP_ID_BASIC]); + + /* Capabilities */ + ret |= init_pci_cap_pm_perm(&cap_perms[PCI_CAP_ID_PM]); + ret |= init_pci_cap_vpd_perm(&cap_perms[PCI_CAP_ID_VPD]); + ret |= init_pci_cap_pcix_perm(&cap_perms[PCI_CAP_ID_PCIX]); + cap_perms[PCI_CAP_ID_VNDR].writefn = vfio_raw_config_write; + ret |= init_pci_cap_exp_perm(&cap_perms[PCI_CAP_ID_EXP]); + ret |= init_pci_cap_af_perm(&cap_perms[PCI_CAP_ID_AF]); + + /* Extended capabilities */ + ret |= init_pci_ext_cap_err_perm(&ecap_perms[PCI_EXT_CAP_ID_ERR]); + ret |= init_pci_ext_cap_pwr_perm(&ecap_perms[PCI_EXT_CAP_ID_PWR]); + ecap_perms[PCI_EXT_CAP_ID_VNDR].writefn = vfio_raw_config_write; + + if (ret) + vfio_pci_uninit_perm_bits(); + + return ret; +} + +static int vfio_find_cap_start(struct vfio_pci_device *vdev, int pos) +{ + u8 cap; + int base = (pos >= PCI_CFG_SPACE_SIZE) ? PCI_CFG_SPACE_SIZE : + PCI_STD_HEADER_SIZEOF; + cap = vdev->pci_config_map[pos]; + + if (cap == PCI_CAP_ID_BASIC) + return 0; + + /* XXX Can we have to abutting capabilities of the same type? */ + while (pos - 1 >= base && vdev->pci_config_map[pos - 1] == cap) + pos--; + + return pos; +} + +static int vfio_msi_config_read(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 *val) +{ + /* Update max available queue size from msi_qmax */ + if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) { + __le16 *flags; + int start; + + start = vfio_find_cap_start(vdev, pos); + + flags = (__le16 *)&vdev->vconfig[start]; + + *flags &= cpu_to_le16(~PCI_MSI_FLAGS_QMASK); + *flags |= cpu_to_le16(vdev->msi_qmax << 1); + } + + return vfio_default_config_read(vdev, pos, count, perm, offset, val); +} + +static int vfio_msi_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + count = vfio_default_config_write(vdev, pos, count, perm, offset, val); + if (count < 0) + return count; + + /* Fixup and write configured queue size and enable to hardware */ + if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) { + __le16 *pflags; + u16 flags; + int start, ret; + + start = vfio_find_cap_start(vdev, pos); + + pflags = (__le16 *)&vdev->vconfig[start + PCI_MSI_FLAGS]; + + flags = le16_to_cpu(*pflags); + + /* MSI is enabled via ioctl */ + if (!is_msi(vdev)) + flags &= ~PCI_MSI_FLAGS_ENABLE; + + /* Check queue size */ + if ((flags & PCI_MSI_FLAGS_QSIZE) >> 4 > vdev->msi_qmax) { + flags &= ~PCI_MSI_FLAGS_QSIZE; + flags |= vdev->msi_qmax << 4; + } + + /* Write back to virt and to hardware */ + *pflags = cpu_to_le16(flags); + ret = pci_user_write_config_word(vdev->pdev, + start + PCI_MSI_FLAGS, + flags); + if (ret) + return ret; + } + + return count; +} + +/* + * MSI determination is per-device, so this routine gets used beyond + * initialization time. Don't add __init + */ +static int init_pci_cap_msi_perm(struct perm_bits *perm, int len, u16 flags) +{ + if (alloc_perm_bits(perm, len)) + return -ENOMEM; + + perm->readfn = vfio_msi_config_read; + perm->writefn = vfio_msi_config_write; + + p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); + + /* + * The upper byte of the control register is reserved, + * just setup the lower byte. + */ + p_setb(perm, PCI_MSI_FLAGS, (u8)ALL_VIRT, (u8)ALL_WRITE); + p_setd(perm, PCI_MSI_ADDRESS_LO, ALL_VIRT, ALL_WRITE); + if (flags & PCI_MSI_FLAGS_64BIT) { + p_setd(perm, PCI_MSI_ADDRESS_HI, ALL_VIRT, ALL_WRITE); + p_setw(perm, PCI_MSI_DATA_64, (u16)ALL_VIRT, (u16)ALL_WRITE); + if (flags & PCI_MSI_FLAGS_MASKBIT) { + p_setd(perm, PCI_MSI_MASK_64, NO_VIRT, ALL_WRITE); + p_setd(perm, PCI_MSI_PENDING_64, NO_VIRT, ALL_WRITE); + } + } else { + p_setw(perm, PCI_MSI_DATA_32, (u16)ALL_VIRT, (u16)ALL_WRITE); + if (flags & PCI_MSI_FLAGS_MASKBIT) { + p_setd(perm, PCI_MSI_MASK_32, NO_VIRT, ALL_WRITE); + p_setd(perm, PCI_MSI_PENDING_32, NO_VIRT, ALL_WRITE); + } + } + return 0; +} + +/* Determine MSI CAP field length; initialize msi_perms on 1st call per vdev */ +static int vfio_msi_cap_len(struct vfio_pci_device *vdev, u8 pos) +{ + struct pci_dev *pdev = vdev->pdev; + int len, ret; + u16 flags; + + ret = pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &flags); + if (ret) + return pcibios_err_to_errno(ret); + + len = 10; /* Minimum size */ + if (flags & PCI_MSI_FLAGS_64BIT) + len += 4; + if (flags & PCI_MSI_FLAGS_MASKBIT) + len += 10; + + if (vdev->msi_perm) + return len; + + vdev->msi_perm = kmalloc(sizeof(struct perm_bits), GFP_KERNEL); + if (!vdev->msi_perm) + return -ENOMEM; + + ret = init_pci_cap_msi_perm(vdev->msi_perm, len, flags); + if (ret) { + kfree(vdev->msi_perm); + return ret; + } + + return len; +} + +/* Determine extended capability length for VC (2 & 9) and MFVC */ +static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos) +{ + struct pci_dev *pdev = vdev->pdev; + u32 tmp; + int ret, evcc, phases, vc_arb; + int len = PCI_CAP_VC_BASE_SIZEOF; + + ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP1, &tmp); + if (ret) + return pcibios_err_to_errno(ret); + + evcc = tmp & PCI_VC_CAP1_EVCC; /* extended vc count */ + ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP2, &tmp); + if (ret) + return pcibios_err_to_errno(ret); + + if (tmp & PCI_VC_CAP2_128_PHASE) + phases = 128; + else if (tmp & PCI_VC_CAP2_64_PHASE) + phases = 64; + else if (tmp & PCI_VC_CAP2_32_PHASE) + phases = 32; + else + phases = 0; + + vc_arb = phases * 4; + + /* + * Port arbitration tables are root & switch only; + * function arbitration tables are function 0 only. + * In either case, we'll never let user write them so + * we don't care how big they are + */ + len += (1 + evcc) * PCI_CAP_VC_PER_VC_SIZEOF; + if (vc_arb) { + len = round_up(len, 16); + len += vc_arb / 8; + } + return len; +} + +static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos) +{ + struct pci_dev *pdev = vdev->pdev; + u32 dword; + u16 word; + u8 byte; + int ret; + + switch (cap) { + case PCI_CAP_ID_MSI: + return vfio_msi_cap_len(vdev, pos); + case PCI_CAP_ID_PCIX: + ret = pci_read_config_word(pdev, pos + PCI_X_CMD, &word); + if (ret) + return pcibios_err_to_errno(ret); + + if (PCI_X_CMD_VERSION(word)) { + if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) { + /* Test for extended capabilities */ + pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, + &dword); + vdev->extended_caps = (dword != 0); + } + return PCI_CAP_PCIX_SIZEOF_V2; + } else + return PCI_CAP_PCIX_SIZEOF_V0; + case PCI_CAP_ID_VNDR: + /* length follows next field */ + ret = pci_read_config_byte(pdev, pos + PCI_CAP_FLAGS, &byte); + if (ret) + return pcibios_err_to_errno(ret); + + return byte; + case PCI_CAP_ID_EXP: + if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) { + /* Test for extended capabilities */ + pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword); + vdev->extended_caps = (dword != 0); + } + + /* length based on version and type */ + if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) { + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END) + return 0xc; /* "All Devices" only, no link */ + return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1; + } else { + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END) + return 0x2c; /* No link */ + return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2; + } + case PCI_CAP_ID_HT: + ret = pci_read_config_byte(pdev, pos + 3, &byte); + if (ret) + return pcibios_err_to_errno(ret); + + return (byte & HT_3BIT_CAP_MASK) ? + HT_CAP_SIZEOF_SHORT : HT_CAP_SIZEOF_LONG; + case PCI_CAP_ID_SATA: + ret = pci_read_config_byte(pdev, pos + PCI_SATA_REGS, &byte); + if (ret) + return pcibios_err_to_errno(ret); + + byte &= PCI_SATA_REGS_MASK; + if (byte == PCI_SATA_REGS_INLINE) + return PCI_SATA_SIZEOF_LONG; + else + return PCI_SATA_SIZEOF_SHORT; + default: + pr_warn("%s: %s unknown length for pci cap 0x%x@0x%x\n", + dev_name(&pdev->dev), __func__, cap, pos); + } + + return 0; +} + +static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos) +{ + struct pci_dev *pdev = vdev->pdev; + u8 byte; + u32 dword; + int ret; + + switch (ecap) { + case PCI_EXT_CAP_ID_VNDR: + ret = pci_read_config_dword(pdev, epos + PCI_VSEC_HDR, &dword); + if (ret) + return pcibios_err_to_errno(ret); + + return dword >> PCI_VSEC_HDR_LEN_SHIFT; + case PCI_EXT_CAP_ID_VC: + case PCI_EXT_CAP_ID_VC9: + case PCI_EXT_CAP_ID_MFVC: + return vfio_vc_cap_len(vdev, epos); + case PCI_EXT_CAP_ID_ACS: + ret = pci_read_config_byte(pdev, epos + PCI_ACS_CAP, &byte); + if (ret) + return pcibios_err_to_errno(ret); + + if (byte & PCI_ACS_EC) { + int bits; + + ret = pci_read_config_byte(pdev, + epos + PCI_ACS_EGRESS_BITS, + &byte); + if (ret) + return pcibios_err_to_errno(ret); + + bits = byte ? round_up(byte, 32) : 256; + return 8 + (bits / 8); + } + return 8; + + case PCI_EXT_CAP_ID_REBAR: + ret = pci_read_config_byte(pdev, epos + PCI_REBAR_CTRL, &byte); + if (ret) + return pcibios_err_to_errno(ret); + + byte &= PCI_REBAR_CTRL_NBAR_MASK; + byte >>= PCI_REBAR_CTRL_NBAR_SHIFT; + + return 4 + (byte * 8); + case PCI_EXT_CAP_ID_DPA: + ret = pci_read_config_byte(pdev, epos + PCI_DPA_CAP, &byte); + if (ret) + return pcibios_err_to_errno(ret); + + byte &= PCI_DPA_CAP_SUBSTATE_MASK; + return PCI_DPA_BASE_SIZEOF + byte + 1; + case PCI_EXT_CAP_ID_TPH: + ret = pci_read_config_dword(pdev, epos + PCI_TPH_CAP, &dword); + if (ret) + return pcibios_err_to_errno(ret); + + if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) { + int sts; + + sts = dword & PCI_TPH_CAP_ST_MASK; + sts >>= PCI_TPH_CAP_ST_SHIFT; + return PCI_TPH_BASE_SIZEOF + (sts * 2) + 2; + } + return PCI_TPH_BASE_SIZEOF; + default: + pr_warn("%s: %s unknown length for pci ecap 0x%x@0x%x\n", + dev_name(&pdev->dev), __func__, ecap, epos); + } + + return 0; +} + +static int vfio_fill_vconfig_bytes(struct vfio_pci_device *vdev, + int offset, int size) +{ + struct pci_dev *pdev = vdev->pdev; + int ret = 0; + + /* + * We try to read physical config space in the largest chunks + * we can, assuming that all of the fields support dword access. + * pci_save_state() makes this same assumption and seems to do ok. + */ + while (size) { + int filled; + + if (size >= 4 && !(offset % 4)) { + __le32 *dwordp = (__le32 *)&vdev->vconfig[offset]; + u32 dword; + + ret = pci_read_config_dword(pdev, offset, &dword); + if (ret) + return ret; + *dwordp = cpu_to_le32(dword); + filled = 4; + } else if (size >= 2 && !(offset % 2)) { + __le16 *wordp = (__le16 *)&vdev->vconfig[offset]; + u16 word; + + ret = pci_read_config_word(pdev, offset, &word); + if (ret) + return ret; + *wordp = cpu_to_le16(word); + filled = 2; + } else { + u8 *byte = &vdev->vconfig[offset]; + ret = pci_read_config_byte(pdev, offset, byte); + if (ret) + return ret; + filled = 1; + } + + offset += filled; + size -= filled; + } + + return ret; +} + +static int vfio_cap_init(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + u8 *map = vdev->pci_config_map; + u16 status; + u8 pos, *prev, cap; + int loops, ret, caps = 0; + + /* Any capabilities? */ + ret = pci_read_config_word(pdev, PCI_STATUS, &status); + if (ret) + return ret; + + if (!(status & PCI_STATUS_CAP_LIST)) + return 0; /* Done */ + + ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos); + if (ret) + return ret; + + /* Mark the previous position in case we want to skip a capability */ + prev = &vdev->vconfig[PCI_CAPABILITY_LIST]; + + /* We can bound our loop, capabilities are dword aligned */ + loops = (PCI_CFG_SPACE_SIZE - PCI_STD_HEADER_SIZEOF) / PCI_CAP_SIZEOF; + while (pos && loops--) { + u8 next; + int i, len = 0; + + ret = pci_read_config_byte(pdev, pos, &cap); + if (ret) + return ret; + + ret = pci_read_config_byte(pdev, + pos + PCI_CAP_LIST_NEXT, &next); + if (ret) + return ret; + + /* + * ID 0 is a NULL capability, conflicting with our fake + * PCI_CAP_ID_BASIC. As it has no content, consider it + * hidden for now. + */ + if (cap && cap <= PCI_CAP_ID_MAX) { + len = pci_cap_length[cap]; + if (len == 0xFF) { /* Variable length */ + len = vfio_cap_len(vdev, cap, pos); + if (len < 0) + return len; + } + } + + if (!len) { + pr_info("%s: %s hiding cap 0x%x\n", + __func__, dev_name(&pdev->dev), cap); + *prev = next; + pos = next; + continue; + } + + /* Sanity check, do we overlap other capabilities? */ + for (i = 0; i < len; i++) { + if (likely(map[pos + i] == PCI_CAP_ID_INVALID)) + continue; + + pr_warn("%s: %s pci config conflict @0x%x, was cap 0x%x now cap 0x%x\n", + __func__, dev_name(&pdev->dev), + pos + i, map[pos + i], cap); + } + + BUILD_BUG_ON(PCI_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT); + + memset(map + pos, cap, len); + ret = vfio_fill_vconfig_bytes(vdev, pos, len); + if (ret) + return ret; + + prev = &vdev->vconfig[pos + PCI_CAP_LIST_NEXT]; + pos = next; + caps++; + } + + /* If we didn't fill any capabilities, clear the status flag */ + if (!caps) { + __le16 *vstatus = (__le16 *)&vdev->vconfig[PCI_STATUS]; + *vstatus &= ~cpu_to_le16(PCI_STATUS_CAP_LIST); + } + + return 0; +} + +static int vfio_ecap_init(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + u8 *map = vdev->pci_config_map; + u16 epos; + __le32 *prev = NULL; + int loops, ret, ecaps = 0; + + if (!vdev->extended_caps) + return 0; + + epos = PCI_CFG_SPACE_SIZE; + + loops = (pdev->cfg_size - PCI_CFG_SPACE_SIZE) / PCI_CAP_SIZEOF; + + while (loops-- && epos >= PCI_CFG_SPACE_SIZE) { + u32 header; + u16 ecap; + int i, len = 0; + bool hidden = false; + + ret = pci_read_config_dword(pdev, epos, &header); + if (ret) + return ret; + + ecap = PCI_EXT_CAP_ID(header); + + if (ecap <= PCI_EXT_CAP_ID_MAX) { + len = pci_ext_cap_length[ecap]; + if (len == 0xFF) { + len = vfio_ext_cap_len(vdev, ecap, epos); + if (len < 0) + return len; + } + } + + if (!len) { + pr_info("%s: %s hiding ecap 0x%x@0x%x\n", + __func__, dev_name(&pdev->dev), ecap, epos); + + /* If not the first in the chain, we can skip over it */ + if (prev) { + u32 val = epos = PCI_EXT_CAP_NEXT(header); + *prev &= cpu_to_le32(~(0xffcU << 20)); + *prev |= cpu_to_le32(val << 20); + continue; + } + + /* + * Otherwise, fill in a placeholder, the direct + * readfn will virtualize this automatically + */ + len = PCI_CAP_SIZEOF; + hidden = true; + } + + for (i = 0; i < len; i++) { + if (likely(map[epos + i] == PCI_CAP_ID_INVALID)) + continue; + + pr_warn("%s: %s pci config conflict @0x%x, was ecap 0x%x now ecap 0x%x\n", + __func__, dev_name(&pdev->dev), + epos + i, map[epos + i], ecap); + } + + /* + * Even though ecap is 2 bytes, we're currently a long way + * from exceeding 1 byte capabilities. If we ever make it + * up to 0xFE we'll need to up this to a two-byte, byte map. + */ + BUILD_BUG_ON(PCI_EXT_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT); + + memset(map + epos, ecap, len); + ret = vfio_fill_vconfig_bytes(vdev, epos, len); + if (ret) + return ret; + + /* + * If we're just using this capability to anchor the list, + * hide the real ID. Only count real ecaps. XXX PCI spec + * indicates to use cap id = 0, version = 0, next = 0 if + * ecaps are absent, hope users check all the way to next. + */ + if (hidden) + *(__le32 *)&vdev->vconfig[epos] &= + cpu_to_le32((0xffcU << 20)); + else + ecaps++; + + prev = (__le32 *)&vdev->vconfig[epos]; + epos = PCI_EXT_CAP_NEXT(header); + } + + if (!ecaps) + *(u32 *)&vdev->vconfig[PCI_CFG_SPACE_SIZE] = 0; + + return 0; +} + +/* + * Nag about hardware bugs, hopefully to have vendors fix them, but at least + * to collect a list of dependencies for the VF INTx pin quirk below. + */ +static const struct pci_device_id known_bogus_vf_intx_pin[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x270c) }, + {} +}; + +/* + * For each device we allocate a pci_config_map that indicates the + * capability occupying each dword and thus the struct perm_bits we + * use for read and write. We also allocate a virtualized config + * space which tracks reads and writes to bits that we emulate for + * the user. Initial values filled from device. + * + * Using shared struct perm_bits between all vfio-pci devices saves + * us from allocating cfg_size buffers for virt and write for every + * device. We could remove vconfig and allocate individual buffers + * for each area requiring emulated bits, but the array of pointers + * would be comparable in size (at least for standard config space). + */ +int vfio_config_init(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + u8 *map, *vconfig; + int ret; + + /* + * Config space, caps and ecaps are all dword aligned, so we could + * use one byte per dword to record the type. However, there are + * no requiremenst on the length of a capability, so the gap between + * capabilities needs byte granularity. + */ + map = kmalloc(pdev->cfg_size, GFP_KERNEL); + if (!map) + return -ENOMEM; + + vconfig = kmalloc(pdev->cfg_size, GFP_KERNEL); + if (!vconfig) { + kfree(map); + return -ENOMEM; + } + + vdev->pci_config_map = map; + vdev->vconfig = vconfig; + + memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF); + memset(map + PCI_STD_HEADER_SIZEOF, PCI_CAP_ID_INVALID, + pdev->cfg_size - PCI_STD_HEADER_SIZEOF); + + ret = vfio_fill_vconfig_bytes(vdev, 0, PCI_STD_HEADER_SIZEOF); + if (ret) + goto out; + + vdev->bardirty = true; + + /* + * XXX can we just pci_load_saved_state/pci_restore_state? + * may need to rebuild vconfig after that + */ + + /* For restore after reset */ + vdev->rbar[0] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_0]); + vdev->rbar[1] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_1]); + vdev->rbar[2] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_2]); + vdev->rbar[3] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_3]); + vdev->rbar[4] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_4]); + vdev->rbar[5] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_5]); + vdev->rbar[6] = le32_to_cpu(*(__le32 *)&vconfig[PCI_ROM_ADDRESS]); + + if (pdev->is_virtfn) { + *(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor); + *(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device); + + /* + * Per SR-IOV spec rev 1.1, 3.4.1.18 the interrupt pin register + * does not apply to VFs and VFs must implement this register + * as read-only with value zero. Userspace is not readily able + * to identify whether a device is a VF and thus that the pin + * definition on the device is bogus should it violate this + * requirement. We already virtualize the pin register for + * other purposes, so we simply need to replace the bogus value + * and consider VFs when we determine INTx IRQ count. + */ + if (vconfig[PCI_INTERRUPT_PIN] && + !pci_match_id(known_bogus_vf_intx_pin, pdev)) + pci_warn(pdev, + "Hardware bug: VF reports bogus INTx pin %d\n", + vconfig[PCI_INTERRUPT_PIN]); + + vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ + + /* + * VFs do no implement the memory enable bit of the COMMAND + * register therefore we'll not have it set in our initial + * copy of config space after pci_enable_device(). For + * consistency with PFs, set the virtual enable bit here. + */ + *(__le16 *)&vconfig[PCI_COMMAND] |= + cpu_to_le16(PCI_COMMAND_MEMORY); + } + + if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) + vconfig[PCI_INTERRUPT_PIN] = 0; + + ret = vfio_cap_init(vdev); + if (ret) + goto out; + + ret = vfio_ecap_init(vdev); + if (ret) + goto out; + + return 0; + +out: + kfree(map); + vdev->pci_config_map = NULL; + kfree(vconfig); + vdev->vconfig = NULL; + return pcibios_err_to_errno(ret); +} + +void vfio_config_free(struct vfio_pci_device *vdev) +{ + kfree(vdev->vconfig); + vdev->vconfig = NULL; + kfree(vdev->pci_config_map); + vdev->pci_config_map = NULL; + if (vdev->msi_perm) { + free_perm_bits(vdev->msi_perm); + kfree(vdev->msi_perm); + vdev->msi_perm = NULL; + } +} + +/* + * Find the remaining number of bytes in a dword that match the given + * position. Stop at either the end of the capability or the dword boundary. + */ +static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_device *vdev, + loff_t pos) +{ + u8 cap = vdev->pci_config_map[pos]; + size_t i; + + for (i = 1; (pos + i) % 4 && vdev->pci_config_map[pos + i] == cap; i++) + /* nop */; + + return i; +} + +static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite) +{ + struct pci_dev *pdev = vdev->pdev; + struct perm_bits *perm; + __le32 val = 0; + int cap_start = 0, offset; + u8 cap_id; + ssize_t ret; + + if (*ppos < 0 || *ppos >= pdev->cfg_size || + *ppos + count > pdev->cfg_size) + return -EFAULT; + + /* + * Chop accesses into aligned chunks containing no more than a + * single capability. Caller increments to the next chunk. + */ + count = min(count, vfio_pci_cap_remaining_dword(vdev, *ppos)); + if (count >= 4 && !(*ppos % 4)) + count = 4; + else if (count >= 2 && !(*ppos % 2)) + count = 2; + else + count = 1; + + ret = count; + + cap_id = vdev->pci_config_map[*ppos]; + + if (cap_id == PCI_CAP_ID_INVALID) { + perm = &unassigned_perms; + cap_start = *ppos; + } else if (cap_id == PCI_CAP_ID_INVALID_VIRT) { + perm = &virt_perms; + cap_start = *ppos; + } else { + if (*ppos >= PCI_CFG_SPACE_SIZE) { + WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX); + + perm = &ecap_perms[cap_id]; + cap_start = vfio_find_cap_start(vdev, *ppos); + } else { + WARN_ON(cap_id > PCI_CAP_ID_MAX); + + perm = &cap_perms[cap_id]; + + if (cap_id == PCI_CAP_ID_MSI) + perm = vdev->msi_perm; + + if (cap_id > PCI_CAP_ID_BASIC) + cap_start = vfio_find_cap_start(vdev, *ppos); + } + } + + WARN_ON(!cap_start && cap_id != PCI_CAP_ID_BASIC); + WARN_ON(cap_start > *ppos); + + offset = *ppos - cap_start; + + if (iswrite) { + if (!perm->writefn) + return ret; + + if (copy_from_user(&val, buf, count)) + return -EFAULT; + + ret = perm->writefn(vdev, *ppos, count, perm, offset, val); + } else { + if (perm->readfn) { + ret = perm->readfn(vdev, *ppos, count, + perm, offset, &val); + if (ret < 0) + return ret; + } + + if (copy_to_user(buf, &val, count)) + return -EFAULT; + } + + return ret; +} + +ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite) +{ + size_t done = 0; + int ret = 0; + loff_t pos = *ppos; + + pos &= VFIO_PCI_OFFSET_MASK; + + while (count) { + ret = vfio_config_do_rw(vdev, buf, count, &pos, iswrite); + if (ret < 0) + return ret; + + count -= ret; + done += ret; + buf += ret; + pos += ret; + } + + *ppos += done; + + return done; +} diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c new file mode 100644 index 000000000..6394b168e --- /dev/null +++ b/drivers/vfio/pci/vfio_pci_igd.c @@ -0,0 +1,280 @@ +/* + * VFIO PCI Intel Graphics support + * + * Copyright (C) 2016 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Register a device specific region through which to provide read-only + * access to the Intel IGD opregion. The register defining the opregion + * address is also virtualized to prevent user modification. + */ + +#include <linux/io.h> +#include <linux/pci.h> +#include <linux/uaccess.h> +#include <linux/vfio.h> + +#include "vfio_pci_private.h" + +#define OPREGION_SIGNATURE "IntelGraphicsMem" +#define OPREGION_SIZE (8 * 1024) +#define OPREGION_PCI_ADDR 0xfc + +static size_t vfio_pci_igd_rw(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite) +{ + unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; + void *base = vdev->region[i].data; + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + + if (pos >= vdev->region[i].size || iswrite) + return -EINVAL; + + count = min(count, (size_t)(vdev->region[i].size - pos)); + + if (copy_to_user(buf, base + pos, count)) + return -EFAULT; + + *ppos += count; + + return count; +} + +static void vfio_pci_igd_release(struct vfio_pci_device *vdev, + struct vfio_pci_region *region) +{ + memunmap(region->data); +} + +static const struct vfio_pci_regops vfio_pci_igd_regops = { + .rw = vfio_pci_igd_rw, + .release = vfio_pci_igd_release, +}; + +static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev) +{ + __le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR); + u32 addr, size; + void *base; + int ret; + + ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr); + if (ret) + return ret; + + if (!addr || !(~addr)) + return -ENODEV; + + base = memremap(addr, OPREGION_SIZE, MEMREMAP_WB); + if (!base) + return -ENOMEM; + + if (memcmp(base, OPREGION_SIGNATURE, 16)) { + memunmap(base); + return -EINVAL; + } + + size = le32_to_cpu(*(__le32 *)(base + 16)); + if (!size) { + memunmap(base); + return -EINVAL; + } + + size *= 1024; /* In KB */ + + if (size != OPREGION_SIZE) { + memunmap(base); + base = memremap(addr, size, MEMREMAP_WB); + if (!base) + return -ENOMEM; + } + + ret = vfio_pci_register_dev_region(vdev, + PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, + VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, + &vfio_pci_igd_regops, size, VFIO_REGION_INFO_FLAG_READ, base); + if (ret) { + memunmap(base); + return ret; + } + + /* Fill vconfig with the hw value and virtualize register */ + *dwordp = cpu_to_le32(addr); + memset(vdev->pci_config_map + OPREGION_PCI_ADDR, + PCI_CAP_ID_INVALID_VIRT, 4); + + return ret; +} + +static size_t vfio_pci_igd_cfg_rw(struct vfio_pci_device *vdev, + char __user *buf, size_t count, loff_t *ppos, + bool iswrite) +{ + unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; + struct pci_dev *pdev = vdev->region[i].data; + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + size_t size; + int ret; + + if (pos >= vdev->region[i].size || iswrite) + return -EINVAL; + + size = count = min(count, (size_t)(vdev->region[i].size - pos)); + + if ((pos & 1) && size) { + u8 val; + + ret = pci_user_read_config_byte(pdev, pos, &val); + if (ret) + return pcibios_err_to_errno(ret); + + if (copy_to_user(buf + count - size, &val, 1)) + return -EFAULT; + + pos++; + size--; + } + + if ((pos & 3) && size > 2) { + u16 val; + + ret = pci_user_read_config_word(pdev, pos, &val); + if (ret) + return pcibios_err_to_errno(ret); + + val = cpu_to_le16(val); + if (copy_to_user(buf + count - size, &val, 2)) + return -EFAULT; + + pos += 2; + size -= 2; + } + + while (size > 3) { + u32 val; + + ret = pci_user_read_config_dword(pdev, pos, &val); + if (ret) + return pcibios_err_to_errno(ret); + + val = cpu_to_le32(val); + if (copy_to_user(buf + count - size, &val, 4)) + return -EFAULT; + + pos += 4; + size -= 4; + } + + while (size >= 2) { + u16 val; + + ret = pci_user_read_config_word(pdev, pos, &val); + if (ret) + return pcibios_err_to_errno(ret); + + val = cpu_to_le16(val); + if (copy_to_user(buf + count - size, &val, 2)) + return -EFAULT; + + pos += 2; + size -= 2; + } + + while (size) { + u8 val; + + ret = pci_user_read_config_byte(pdev, pos, &val); + if (ret) + return pcibios_err_to_errno(ret); + + if (copy_to_user(buf + count - size, &val, 1)) + return -EFAULT; + + pos++; + size--; + } + + *ppos += count; + + return count; +} + +static void vfio_pci_igd_cfg_release(struct vfio_pci_device *vdev, + struct vfio_pci_region *region) +{ + struct pci_dev *pdev = region->data; + + pci_dev_put(pdev); +} + +static const struct vfio_pci_regops vfio_pci_igd_cfg_regops = { + .rw = vfio_pci_igd_cfg_rw, + .release = vfio_pci_igd_cfg_release, +}; + +static int vfio_pci_igd_cfg_init(struct vfio_pci_device *vdev) +{ + struct pci_dev *host_bridge, *lpc_bridge; + int ret; + + host_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); + if (!host_bridge) + return -ENODEV; + + if (host_bridge->vendor != PCI_VENDOR_ID_INTEL || + host_bridge->class != (PCI_CLASS_BRIDGE_HOST << 8)) { + pci_dev_put(host_bridge); + return -EINVAL; + } + + ret = vfio_pci_register_dev_region(vdev, + PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, + VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG, + &vfio_pci_igd_cfg_regops, host_bridge->cfg_size, + VFIO_REGION_INFO_FLAG_READ, host_bridge); + if (ret) { + pci_dev_put(host_bridge); + return ret; + } + + lpc_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x1f, 0)); + if (!lpc_bridge) + return -ENODEV; + + if (lpc_bridge->vendor != PCI_VENDOR_ID_INTEL || + lpc_bridge->class != (PCI_CLASS_BRIDGE_ISA << 8)) { + pci_dev_put(lpc_bridge); + return -EINVAL; + } + + ret = vfio_pci_register_dev_region(vdev, + PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, + VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG, + &vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size, + VFIO_REGION_INFO_FLAG_READ, lpc_bridge); + if (ret) { + pci_dev_put(lpc_bridge); + return ret; + } + + return 0; +} + +int vfio_pci_igd_init(struct vfio_pci_device *vdev) +{ + int ret; + + ret = vfio_pci_igd_opregion_init(vdev); + if (ret) + return ret; + + ret = vfio_pci_igd_cfg_init(vdev); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c new file mode 100644 index 000000000..c989f777b --- /dev/null +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -0,0 +1,696 @@ +/* + * VFIO PCI interrupt handling + * + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + */ + +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/eventfd.h> +#include <linux/msi.h> +#include <linux/pci.h> +#include <linux/file.h> +#include <linux/vfio.h> +#include <linux/wait.h> +#include <linux/slab.h> + +#include "vfio_pci_private.h" + +/* + * INTx + */ +static void vfio_send_intx_eventfd(void *opaque, void *unused) +{ + struct vfio_pci_device *vdev = opaque; + + if (likely(is_intx(vdev) && !vdev->virq_disabled)) + eventfd_signal(vdev->ctx[0].trigger, 1); +} + +void vfio_pci_intx_mask(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + unsigned long flags; + + spin_lock_irqsave(&vdev->irqlock, flags); + + /* + * Masking can come from interrupt, ioctl, or config space + * via INTx disable. The latter means this can get called + * even when not using intx delivery. In this case, just + * try to have the physical bit follow the virtual bit. + */ + if (unlikely(!is_intx(vdev))) { + if (vdev->pci_2_3) + pci_intx(pdev, 0); + } else if (!vdev->ctx[0].masked) { + /* + * Can't use check_and_mask here because we always want to + * mask, not just when something is pending. + */ + if (vdev->pci_2_3) + pci_intx(pdev, 0); + else + disable_irq_nosync(pdev->irq); + + vdev->ctx[0].masked = true; + } + + spin_unlock_irqrestore(&vdev->irqlock, flags); +} + +/* + * If this is triggered by an eventfd, we can't call eventfd_signal + * or else we'll deadlock on the eventfd wait queue. Return >0 when + * a signal is necessary, which can then be handled via a work queue + * or directly depending on the caller. + */ +static int vfio_pci_intx_unmask_handler(void *opaque, void *unused) +{ + struct vfio_pci_device *vdev = opaque; + struct pci_dev *pdev = vdev->pdev; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&vdev->irqlock, flags); + + /* + * Unmasking comes from ioctl or config, so again, have the + * physical bit follow the virtual even when not using INTx. + */ + if (unlikely(!is_intx(vdev))) { + if (vdev->pci_2_3) + pci_intx(pdev, 1); + } else if (vdev->ctx[0].masked && !vdev->virq_disabled) { + /* + * A pending interrupt here would immediately trigger, + * but we can avoid that overhead by just re-sending + * the interrupt to the user. + */ + if (vdev->pci_2_3) { + if (!pci_check_and_unmask_intx(pdev)) + ret = 1; + } else + enable_irq(pdev->irq); + + vdev->ctx[0].masked = (ret > 0); + } + + spin_unlock_irqrestore(&vdev->irqlock, flags); + + return ret; +} + +void vfio_pci_intx_unmask(struct vfio_pci_device *vdev) +{ + if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0) + vfio_send_intx_eventfd(vdev, NULL); +} + +static irqreturn_t vfio_intx_handler(int irq, void *dev_id) +{ + struct vfio_pci_device *vdev = dev_id; + unsigned long flags; + int ret = IRQ_NONE; + + spin_lock_irqsave(&vdev->irqlock, flags); + + if (!vdev->pci_2_3) { + disable_irq_nosync(vdev->pdev->irq); + vdev->ctx[0].masked = true; + ret = IRQ_HANDLED; + } else if (!vdev->ctx[0].masked && /* may be shared */ + pci_check_and_mask_intx(vdev->pdev)) { + vdev->ctx[0].masked = true; + ret = IRQ_HANDLED; + } + + spin_unlock_irqrestore(&vdev->irqlock, flags); + + if (ret == IRQ_HANDLED) + vfio_send_intx_eventfd(vdev, NULL); + + return ret; +} + +static int vfio_intx_enable(struct vfio_pci_device *vdev) +{ + if (!is_irq_none(vdev)) + return -EINVAL; + + if (!vdev->pdev->irq) + return -ENODEV; + + vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); + if (!vdev->ctx) + return -ENOMEM; + + vdev->num_ctx = 1; + + /* + * If the virtual interrupt is masked, restore it. Devices + * supporting DisINTx can be masked at the hardware level + * here, non-PCI-2.3 devices will have to wait until the + * interrupt is enabled. + */ + vdev->ctx[0].masked = vdev->virq_disabled; + if (vdev->pci_2_3) + pci_intx(vdev->pdev, !vdev->ctx[0].masked); + + vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX; + + return 0; +} + +static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd) +{ + struct pci_dev *pdev = vdev->pdev; + unsigned long irqflags = IRQF_SHARED; + struct eventfd_ctx *trigger; + unsigned long flags; + int ret; + + if (vdev->ctx[0].trigger) { + free_irq(pdev->irq, vdev); + kfree(vdev->ctx[0].name); + eventfd_ctx_put(vdev->ctx[0].trigger); + vdev->ctx[0].trigger = NULL; + } + + if (fd < 0) /* Disable only */ + return 0; + + vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)", + pci_name(pdev)); + if (!vdev->ctx[0].name) + return -ENOMEM; + + trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(trigger)) { + kfree(vdev->ctx[0].name); + return PTR_ERR(trigger); + } + + vdev->ctx[0].trigger = trigger; + + if (!vdev->pci_2_3) + irqflags = 0; + + ret = request_irq(pdev->irq, vfio_intx_handler, + irqflags, vdev->ctx[0].name, vdev); + if (ret) { + vdev->ctx[0].trigger = NULL; + kfree(vdev->ctx[0].name); + eventfd_ctx_put(trigger); + return ret; + } + + /* + * INTx disable will stick across the new irq setup, + * disable_irq won't. + */ + spin_lock_irqsave(&vdev->irqlock, flags); + if (!vdev->pci_2_3 && vdev->ctx[0].masked) + disable_irq_nosync(pdev->irq); + spin_unlock_irqrestore(&vdev->irqlock, flags); + + return 0; +} + +static void vfio_intx_disable(struct vfio_pci_device *vdev) +{ + vfio_virqfd_disable(&vdev->ctx[0].unmask); + vfio_virqfd_disable(&vdev->ctx[0].mask); + vfio_intx_set_signal(vdev, -1); + vdev->irq_type = VFIO_PCI_NUM_IRQS; + vdev->num_ctx = 0; + kfree(vdev->ctx); +} + +/* + * MSI/MSI-X + */ +static irqreturn_t vfio_msihandler(int irq, void *arg) +{ + struct eventfd_ctx *trigger = arg; + + eventfd_signal(trigger, 1); + return IRQ_HANDLED; +} + +static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) +{ + struct pci_dev *pdev = vdev->pdev; + unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; + int ret; + u16 cmd; + + if (!is_irq_none(vdev)) + return -EINVAL; + + vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); + if (!vdev->ctx) + return -ENOMEM; + + /* return the number of supported vectors if we can't get all: */ + cmd = vfio_pci_memory_lock_and_enable(vdev); + ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); + if (ret < nvec) { + if (ret > 0) + pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); + kfree(vdev->ctx); + return ret; + } + vfio_pci_memory_unlock_and_restore(vdev, cmd); + + vdev->num_ctx = nvec; + vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : + VFIO_PCI_MSI_IRQ_INDEX; + + if (!msix) { + /* + * Compute the virtual hardware field for max msi vectors - + * it is the log base 2 of the number of vectors. + */ + vdev->msi_qmax = fls(nvec * 2 - 1) - 1; + } + + return 0; +} + +static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, + int vector, int fd, bool msix) +{ + struct pci_dev *pdev = vdev->pdev; + struct eventfd_ctx *trigger; + int irq, ret; + u16 cmd; + + if (vector < 0 || vector >= vdev->num_ctx) + return -EINVAL; + + irq = pci_irq_vector(pdev, vector); + + if (vdev->ctx[vector].trigger) { + irq_bypass_unregister_producer(&vdev->ctx[vector].producer); + + cmd = vfio_pci_memory_lock_and_enable(vdev); + free_irq(irq, vdev->ctx[vector].trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); + + kfree(vdev->ctx[vector].name); + eventfd_ctx_put(vdev->ctx[vector].trigger); + vdev->ctx[vector].trigger = NULL; + } + + if (fd < 0) + return 0; + + vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)", + msix ? "x" : "", vector, + pci_name(pdev)); + if (!vdev->ctx[vector].name) + return -ENOMEM; + + trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(trigger)) { + kfree(vdev->ctx[vector].name); + return PTR_ERR(trigger); + } + + /* + * The MSIx vector table resides in device memory which may be cleared + * via backdoor resets. We don't allow direct access to the vector + * table so even if a userspace driver attempts to save/restore around + * such a reset it would be unsuccessful. To avoid this, restore the + * cached value of the message prior to enabling. + */ + cmd = vfio_pci_memory_lock_and_enable(vdev); + if (msix) { + struct msi_msg msg; + + get_cached_msi_msg(irq, &msg); + pci_write_msi_msg(irq, &msg); + } + + ret = request_irq(irq, vfio_msihandler, 0, + vdev->ctx[vector].name, trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); + if (ret) { + kfree(vdev->ctx[vector].name); + eventfd_ctx_put(trigger); + return ret; + } + + vdev->ctx[vector].producer.token = trigger; + vdev->ctx[vector].producer.irq = irq; + ret = irq_bypass_register_producer(&vdev->ctx[vector].producer); + if (unlikely(ret)) { + dev_info(&pdev->dev, + "irq bypass producer (token %p) registration fails: %d\n", + vdev->ctx[vector].producer.token, ret); + + vdev->ctx[vector].producer.token = NULL; + } + vdev->ctx[vector].trigger = trigger; + + return 0; +} + +static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start, + unsigned count, int32_t *fds, bool msix) +{ + int i, j, ret = 0; + + if (start >= vdev->num_ctx || start + count > vdev->num_ctx) + return -EINVAL; + + for (i = 0, j = start; i < count && !ret; i++, j++) { + int fd = fds ? fds[i] : -1; + ret = vfio_msi_set_vector_signal(vdev, j, fd, msix); + } + + if (ret) { + for (--j; j >= (int)start; j--) + vfio_msi_set_vector_signal(vdev, j, -1, msix); + } + + return ret; +} + +static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) +{ + struct pci_dev *pdev = vdev->pdev; + int i; + u16 cmd; + + for (i = 0; i < vdev->num_ctx; i++) { + vfio_virqfd_disable(&vdev->ctx[i].unmask); + vfio_virqfd_disable(&vdev->ctx[i].mask); + } + + vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); + + cmd = vfio_pci_memory_lock_and_enable(vdev); + pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); + + /* + * Both disable paths above use pci_intx_for_msi() to clear DisINTx + * via their shutdown paths. Restore for NoINTx devices. + */ + if (vdev->nointx) + pci_intx(pdev, 0); + + vdev->irq_type = VFIO_PCI_NUM_IRQS; + vdev->num_ctx = 0; + kfree(vdev->ctx); +} + +/* + * IOCTL support + */ +static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) +{ + if (!is_intx(vdev) || start != 0 || count != 1) + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_NONE) { + vfio_pci_intx_unmask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t unmask = *(uint8_t *)data; + if (unmask) + vfio_pci_intx_unmask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t fd = *(int32_t *)data; + if (fd >= 0) + return vfio_virqfd_enable((void *) vdev, + vfio_pci_intx_unmask_handler, + vfio_send_intx_eventfd, NULL, + &vdev->ctx[0].unmask, fd); + + vfio_virqfd_disable(&vdev->ctx[0].unmask); + } + + return 0; +} + +static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) +{ + if (!is_intx(vdev) || start != 0 || count != 1) + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_NONE) { + vfio_pci_intx_mask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t mask = *(uint8_t *)data; + if (mask) + vfio_pci_intx_mask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + return -ENOTTY; /* XXX implement me */ + } + + return 0; +} + +static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) +{ + if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) { + vfio_intx_disable(vdev); + return 0; + } + + if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1) + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t fd = *(int32_t *)data; + int ret; + + if (is_intx(vdev)) + return vfio_intx_set_signal(vdev, fd); + + ret = vfio_intx_enable(vdev); + if (ret) + return ret; + + ret = vfio_intx_set_signal(vdev, fd); + if (ret) + vfio_intx_disable(vdev); + + return ret; + } + + if (!is_intx(vdev)) + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_NONE) { + vfio_send_intx_eventfd(vdev, NULL); + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t trigger = *(uint8_t *)data; + if (trigger) + vfio_send_intx_eventfd(vdev, NULL); + } + return 0; +} + +static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) +{ + int i; + bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false; + + if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) { + vfio_msi_disable(vdev, msix); + return 0; + } + + if (!(irq_is(vdev, index) || is_irq_none(vdev))) + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t *fds = data; + int ret; + + if (vdev->irq_type == index) + return vfio_msi_set_block(vdev, start, count, + fds, msix); + + ret = vfio_msi_enable(vdev, start + count, msix); + if (ret) + return ret; + + ret = vfio_msi_set_block(vdev, start, count, fds, msix); + if (ret) + vfio_msi_disable(vdev, msix); + + return ret; + } + + if (!irq_is(vdev, index) || start + count > vdev->num_ctx) + return -EINVAL; + + for (i = start; i < start + count; i++) { + if (!vdev->ctx[i].trigger) + continue; + if (flags & VFIO_IRQ_SET_DATA_NONE) { + eventfd_signal(vdev->ctx[i].trigger, 1); + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t *bools = data; + if (bools[i - start]) + eventfd_signal(vdev->ctx[i].trigger, 1); + } + } + return 0; +} + +static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, + unsigned int count, uint32_t flags, + void *data) +{ + /* DATA_NONE/DATA_BOOL enables loopback testing */ + if (flags & VFIO_IRQ_SET_DATA_NONE) { + if (*ctx) { + if (count) { + eventfd_signal(*ctx, 1); + } else { + eventfd_ctx_put(*ctx); + *ctx = NULL; + } + return 0; + } + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t trigger; + + if (!count) + return -EINVAL; + + trigger = *(uint8_t *)data; + if (trigger && *ctx) + eventfd_signal(*ctx, 1); + + return 0; + } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t fd; + + if (!count) + return -EINVAL; + + fd = *(int32_t *)data; + if (fd == -1) { + if (*ctx) + eventfd_ctx_put(*ctx); + *ctx = NULL; + } else if (fd >= 0) { + struct eventfd_ctx *efdctx; + + efdctx = eventfd_ctx_fdget(fd); + if (IS_ERR(efdctx)) + return PTR_ERR(efdctx); + + if (*ctx) + eventfd_ctx_put(*ctx); + + *ctx = efdctx; + } + return 0; + } + + return -EINVAL; +} + +static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) +{ + if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1) + return -EINVAL; + + return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, + count, flags, data); +} + +static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) +{ + if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1) + return -EINVAL; + + return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, + count, flags, data); +} + +int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, + unsigned index, unsigned start, unsigned count, + void *data) +{ + int (*func)(struct vfio_pci_device *vdev, unsigned index, + unsigned start, unsigned count, uint32_t flags, + void *data) = NULL; + + switch (index) { + case VFIO_PCI_INTX_IRQ_INDEX: + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_MASK: + func = vfio_pci_set_intx_mask; + break; + case VFIO_IRQ_SET_ACTION_UNMASK: + func = vfio_pci_set_intx_unmask; + break; + case VFIO_IRQ_SET_ACTION_TRIGGER: + func = vfio_pci_set_intx_trigger; + break; + } + break; + case VFIO_PCI_MSI_IRQ_INDEX: + case VFIO_PCI_MSIX_IRQ_INDEX: + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_MASK: + case VFIO_IRQ_SET_ACTION_UNMASK: + /* XXX Need masking support exported */ + break; + case VFIO_IRQ_SET_ACTION_TRIGGER: + func = vfio_pci_set_msi_trigger; + break; + } + break; + case VFIO_PCI_ERR_IRQ_INDEX: + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_TRIGGER: + if (pci_is_pcie(vdev->pdev)) + func = vfio_pci_set_err_trigger; + break; + } + break; + case VFIO_PCI_REQ_IRQ_INDEX: + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_TRIGGER: + func = vfio_pci_set_req_trigger; + break; + } + break; + } + + if (!func) + return -ENOTTY; + + return func(vdev, index, start, count, flags, data); +} diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h new file mode 100644 index 000000000..17d2bae5b --- /dev/null +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + */ + +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/irqbypass.h> +#include <linux/types.h> + +#ifndef VFIO_PCI_PRIVATE_H +#define VFIO_PCI_PRIVATE_H + +#define VFIO_PCI_OFFSET_SHIFT 40 + +#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT) +#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) +#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) + +/* Special capability IDs predefined access */ +#define PCI_CAP_ID_INVALID 0xFF /* default raw access */ +#define PCI_CAP_ID_INVALID_VIRT 0xFE /* default virt access */ + +/* Cap maximum number of ioeventfds per device (arbitrary) */ +#define VFIO_PCI_IOEVENTFD_MAX 1000 + +struct vfio_pci_ioeventfd { + struct list_head next; + struct virqfd *virqfd; + void __iomem *addr; + uint64_t data; + loff_t pos; + int bar; + int count; +}; + +struct vfio_pci_irq_ctx { + struct eventfd_ctx *trigger; + struct virqfd *unmask; + struct virqfd *mask; + char *name; + bool masked; + struct irq_bypass_producer producer; +}; + +struct vfio_pci_device; +struct vfio_pci_region; + +struct vfio_pci_regops { + size_t (*rw)(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); + void (*release)(struct vfio_pci_device *vdev, + struct vfio_pci_region *region); +}; + +struct vfio_pci_region { + u32 type; + u32 subtype; + const struct vfio_pci_regops *ops; + void *data; + size_t size; + u32 flags; +}; + +struct vfio_pci_dummy_resource { + struct resource resource; + int index; + struct list_head res_next; +}; + +struct vfio_pci_mmap_vma { + struct vm_area_struct *vma; + struct list_head vma_next; +}; + +struct vfio_pci_device { + struct pci_dev *pdev; + void __iomem *barmap[PCI_STD_RESOURCE_END + 1]; + bool bar_mmap_supported[PCI_STD_RESOURCE_END + 1]; + u8 *pci_config_map; + u8 *vconfig; + struct perm_bits *msi_perm; + spinlock_t irqlock; + struct mutex igate; + struct vfio_pci_irq_ctx *ctx; + int num_ctx; + int irq_type; + int num_regions; + struct vfio_pci_region *region; + u8 msi_qmax; + u8 msix_bar; + u16 msix_size; + u32 msix_offset; + u32 rbar[7]; + bool pci_2_3; + bool virq_disabled; + bool reset_works; + bool extended_caps; + bool bardirty; + bool has_vga; + bool needs_reset; + bool nointx; + struct pci_saved_state *pci_saved_state; + int refcnt; + int ioeventfds_nr; + struct eventfd_ctx *err_trigger; + struct eventfd_ctx *req_trigger; + struct list_head dummy_resources_list; + struct mutex ioeventfds_lock; + struct list_head ioeventfds_list; + struct mutex vma_lock; + struct list_head vma_list; + struct rw_semaphore memory_lock; +}; + +#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) +#define is_msi(vdev) (vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX) +#define is_msix(vdev) (vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX) +#define is_irq_none(vdev) (!(is_intx(vdev) || is_msi(vdev) || is_msix(vdev))) +#define irq_is(vdev, type) (vdev->irq_type == type) + +extern void vfio_pci_intx_mask(struct vfio_pci_device *vdev); +extern void vfio_pci_intx_unmask(struct vfio_pci_device *vdev); + +extern int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, + uint32_t flags, unsigned index, + unsigned start, unsigned count, void *data); + +extern ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev, + char __user *buf, size_t count, + loff_t *ppos, bool iswrite); + +extern ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); + +extern ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); + +extern long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset, + uint64_t data, int count, int fd); + +extern int vfio_pci_init_perm_bits(void); +extern void vfio_pci_uninit_perm_bits(void); + +extern int vfio_config_init(struct vfio_pci_device *vdev); +extern void vfio_config_free(struct vfio_pci_device *vdev); + +extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, + unsigned int type, unsigned int subtype, + const struct vfio_pci_regops *ops, + size_t size, u32 flags, void *data); + +extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev); +extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device + *vdev); +extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev); +extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, + u16 cmd); + +#ifdef CONFIG_VFIO_PCI_IGD +extern int vfio_pci_igd_init(struct vfio_pci_device *vdev); +#else +static inline int vfio_pci_igd_init(struct vfio_pci_device *vdev) +{ + return -ENODEV; +} +#endif +#endif /* VFIO_PCI_PRIVATE_H */ diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c new file mode 100644 index 000000000..3d0ec2bbe --- /dev/null +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -0,0 +1,404 @@ +/* + * VFIO PCI I/O Port & MMIO access + * + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + */ + +#include <linux/fs.h> +#include <linux/pci.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/vfio.h> +#include <linux/vgaarb.h> + +#include "vfio_pci_private.h" + +#ifdef __LITTLE_ENDIAN +#define vfio_ioread64 ioread64 +#define vfio_iowrite64 iowrite64 +#define vfio_ioread32 ioread32 +#define vfio_iowrite32 iowrite32 +#define vfio_ioread16 ioread16 +#define vfio_iowrite16 iowrite16 +#else +#define vfio_ioread64 ioread64be +#define vfio_iowrite64 iowrite64be +#define vfio_ioread32 ioread32be +#define vfio_iowrite32 iowrite32be +#define vfio_ioread16 ioread16be +#define vfio_iowrite16 iowrite16be +#endif +#define vfio_ioread8 ioread8 +#define vfio_iowrite8 iowrite8 + +/* + * Read or write from an __iomem region (MMIO or I/O port) with an excluded + * range which is inaccessible. The excluded range drops writes and fills + * reads with -1. This is intended for handling MSI-X vector tables and + * leftover space for ROM BARs. + */ +static ssize_t do_io_rw(void __iomem *io, char __user *buf, + loff_t off, size_t count, size_t x_start, + size_t x_end, bool iswrite) +{ + ssize_t done = 0; + + while (count) { + size_t fillable, filled; + + if (off < x_start) + fillable = min(count, (size_t)(x_start - off)); + else if (off >= x_end) + fillable = count; + else + fillable = 0; + + if (fillable >= 4 && !(off % 4)) { + u32 val; + + if (iswrite) { + if (copy_from_user(&val, buf, 4)) + return -EFAULT; + + vfio_iowrite32(val, io + off); + } else { + val = vfio_ioread32(io + off); + + if (copy_to_user(buf, &val, 4)) + return -EFAULT; + } + + filled = 4; + } else if (fillable >= 2 && !(off % 2)) { + u16 val; + + if (iswrite) { + if (copy_from_user(&val, buf, 2)) + return -EFAULT; + + vfio_iowrite16(val, io + off); + } else { + val = vfio_ioread16(io + off); + + if (copy_to_user(buf, &val, 2)) + return -EFAULT; + } + + filled = 2; + } else if (fillable) { + u8 val; + + if (iswrite) { + if (copy_from_user(&val, buf, 1)) + return -EFAULT; + + vfio_iowrite8(val, io + off); + } else { + val = vfio_ioread8(io + off); + + if (copy_to_user(buf, &val, 1)) + return -EFAULT; + } + + filled = 1; + } else { + /* Fill reads with -1, drop writes */ + filled = min(count, (size_t)(x_end - off)); + if (!iswrite) { + u8 val = 0xFF; + size_t i; + + for (i = 0; i < filled; i++) + if (copy_to_user(buf + i, &val, 1)) + return -EFAULT; + } + } + + count -= filled; + done += filled; + off += filled; + buf += filled; + } + + return done; +} + +static int vfio_pci_setup_barmap(struct vfio_pci_device *vdev, int bar) +{ + struct pci_dev *pdev = vdev->pdev; + int ret; + void __iomem *io; + + if (vdev->barmap[bar]) + return 0; + + ret = pci_request_selected_regions(pdev, 1 << bar, "vfio"); + if (ret) + return ret; + + io = pci_iomap(pdev, bar, 0); + if (!io) { + pci_release_selected_regions(pdev, 1 << bar); + return -ENOMEM; + } + + vdev->barmap[bar] = io; + + return 0; +} + +ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite) +{ + struct pci_dev *pdev = vdev->pdev; + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + size_t x_start = 0, x_end = 0; + resource_size_t end; + void __iomem *io; + struct resource *res = &vdev->pdev->resource[bar]; + ssize_t done; + + if (pci_resource_start(pdev, bar)) + end = pci_resource_len(pdev, bar); + else if (bar == PCI_ROM_RESOURCE && + pdev->resource[bar].flags & IORESOURCE_ROM_SHADOW) + end = 0x20000; + else + return -EINVAL; + + if (pos >= end) + return -EINVAL; + + count = min(count, (size_t)(end - pos)); + + if (res->flags & IORESOURCE_MEM) { + down_read(&vdev->memory_lock); + if (!__vfio_pci_memory_enabled(vdev)) { + up_read(&vdev->memory_lock); + return -EIO; + } + } + + if (bar == PCI_ROM_RESOURCE) { + /* + * The ROM can fill less space than the BAR, so we start the + * excluded range at the end of the actual ROM. This makes + * filling large ROM BARs much faster. + */ + io = pci_map_rom(pdev, &x_start); + if (!io) { + done = -ENOMEM; + goto out; + } + x_end = end; + } else { + int ret = vfio_pci_setup_barmap(vdev, bar); + if (ret) { + done = ret; + goto out; + } + + io = vdev->barmap[bar]; + } + + if (bar == vdev->msix_bar) { + x_start = vdev->msix_offset; + x_end = vdev->msix_offset + vdev->msix_size; + } + + done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite); + + if (done >= 0) + *ppos += done; + + if (bar == PCI_ROM_RESOURCE) + pci_unmap_rom(pdev, io); +out: + if (res->flags & IORESOURCE_MEM) + up_read(&vdev->memory_lock); + + return done; +} + +ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite) +{ + int ret; + loff_t off, pos = *ppos & VFIO_PCI_OFFSET_MASK; + void __iomem *iomem = NULL; + unsigned int rsrc; + bool is_ioport; + ssize_t done; + + if (!vdev->has_vga) + return -EINVAL; + + if (pos > 0xbfffful) + return -EINVAL; + + switch ((u32)pos) { + case 0xa0000 ... 0xbffff: + count = min(count, (size_t)(0xc0000 - pos)); + iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1); + off = pos - 0xa0000; + rsrc = VGA_RSRC_LEGACY_MEM; + is_ioport = false; + break; + case 0x3b0 ... 0x3bb: + count = min(count, (size_t)(0x3bc - pos)); + iomem = ioport_map(0x3b0, 0x3bb - 0x3b0 + 1); + off = pos - 0x3b0; + rsrc = VGA_RSRC_LEGACY_IO; + is_ioport = true; + break; + case 0x3c0 ... 0x3df: + count = min(count, (size_t)(0x3e0 - pos)); + iomem = ioport_map(0x3c0, 0x3df - 0x3c0 + 1); + off = pos - 0x3c0; + rsrc = VGA_RSRC_LEGACY_IO; + is_ioport = true; + break; + default: + return -EINVAL; + } + + if (!iomem) + return -ENOMEM; + + ret = vga_get_interruptible(vdev->pdev, rsrc); + if (ret) { + is_ioport ? ioport_unmap(iomem) : iounmap(iomem); + return ret; + } + + done = do_io_rw(iomem, buf, off, count, 0, 0, iswrite); + + vga_put(vdev->pdev, rsrc); + + is_ioport ? ioport_unmap(iomem) : iounmap(iomem); + + if (done >= 0) + *ppos += done; + + return done; +} + +static int vfio_pci_ioeventfd_handler(void *opaque, void *unused) +{ + struct vfio_pci_ioeventfd *ioeventfd = opaque; + + switch (ioeventfd->count) { + case 1: + vfio_iowrite8(ioeventfd->data, ioeventfd->addr); + break; + case 2: + vfio_iowrite16(ioeventfd->data, ioeventfd->addr); + break; + case 4: + vfio_iowrite32(ioeventfd->data, ioeventfd->addr); + break; +#ifdef iowrite64 + case 8: + vfio_iowrite64(ioeventfd->data, ioeventfd->addr); + break; +#endif + } + + return 0; +} + +long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset, + uint64_t data, int count, int fd) +{ + struct pci_dev *pdev = vdev->pdev; + loff_t pos = offset & VFIO_PCI_OFFSET_MASK; + int ret, bar = VFIO_PCI_OFFSET_TO_INDEX(offset); + struct vfio_pci_ioeventfd *ioeventfd; + + /* Only support ioeventfds into BARs */ + if (bar > VFIO_PCI_BAR5_REGION_INDEX) + return -EINVAL; + + if (pos + count > pci_resource_len(pdev, bar)) + return -EINVAL; + + /* Disallow ioeventfds working around MSI-X table writes */ + if (bar == vdev->msix_bar && + !(pos + count <= vdev->msix_offset || + pos >= vdev->msix_offset + vdev->msix_size)) + return -EINVAL; + +#ifndef iowrite64 + if (count == 8) + return -EINVAL; +#endif + + ret = vfio_pci_setup_barmap(vdev, bar); + if (ret) + return ret; + + mutex_lock(&vdev->ioeventfds_lock); + + list_for_each_entry(ioeventfd, &vdev->ioeventfds_list, next) { + if (ioeventfd->pos == pos && ioeventfd->bar == bar && + ioeventfd->data == data && ioeventfd->count == count) { + if (fd == -1) { + vfio_virqfd_disable(&ioeventfd->virqfd); + list_del(&ioeventfd->next); + vdev->ioeventfds_nr--; + kfree(ioeventfd); + ret = 0; + } else + ret = -EEXIST; + + goto out_unlock; + } + } + + if (fd < 0) { + ret = -ENODEV; + goto out_unlock; + } + + if (vdev->ioeventfds_nr >= VFIO_PCI_IOEVENTFD_MAX) { + ret = -ENOSPC; + goto out_unlock; + } + + ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL); + if (!ioeventfd) { + ret = -ENOMEM; + goto out_unlock; + } + + ioeventfd->addr = vdev->barmap[bar] + pos; + ioeventfd->data = data; + ioeventfd->pos = pos; + ioeventfd->bar = bar; + ioeventfd->count = count; + + ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler, + NULL, NULL, &ioeventfd->virqfd, fd); + if (ret) { + kfree(ioeventfd); + goto out_unlock; + } + + list_add(&ioeventfd->next, &vdev->ioeventfds_list); + vdev->ioeventfds_nr++; + +out_unlock: + mutex_unlock(&vdev->ioeventfds_lock); + + return ret; +} |