diff options
Diffstat (limited to 'drivers/vfio')
34 files changed, 14186 insertions, 0 deletions
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig new file mode 100644 index 000000000..b7765271d --- /dev/null +++ b/drivers/vfio/Kconfig @@ -0,0 +1,50 @@ +config VFIO_IOMMU_TYPE1 + tristate + depends on VFIO + default n + +config VFIO_IOMMU_SPAPR_TCE + tristate + depends on VFIO && SPAPR_TCE_IOMMU + default VFIO + +config VFIO_SPAPR_EEH + tristate + depends on EEH && VFIO_IOMMU_SPAPR_TCE + default VFIO + +config VFIO_VIRQFD + tristate + depends on VFIO && EVENTFD + default n + +menuconfig VFIO + tristate "VFIO Non-Privileged userspace driver framework" + depends on IOMMU_API + select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM_SMMU || ARM_SMMU_V3) + select ANON_INODES + help + VFIO provides a framework for secure userspace device drivers. + See Documentation/vfio.txt for more details. + + If you don't know what to do here, say N. + +config VFIO_NOIOMMU + bool "VFIO No-IOMMU support" + depends on VFIO + help + VFIO is built on the ability to isolate devices using the IOMMU. + Only with an IOMMU can userspace access to DMA capable devices be + considered secure. VFIO No-IOMMU mode enables IOMMU groups for + devices without IOMMU backing for the purpose of re-using the VFIO + infrastructure in a non-secure mode. Use of this mode will result + in an unsupportable kernel and will therefore taint the kernel. + Device assignment to virtual machines is also not possible with + this mode since there is no IOMMU to provide DMA translation. + + If you don't know what to do here, say N. + +source "drivers/vfio/pci/Kconfig" +source "drivers/vfio/platform/Kconfig" +source "drivers/vfio/mdev/Kconfig" +source "virt/lib/Kconfig" diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile new file mode 100644 index 000000000..de67c4725 --- /dev/null +++ b/drivers/vfio/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +vfio_virqfd-y := virqfd.o + +obj-$(CONFIG_VFIO) += vfio.o +obj-$(CONFIG_VFIO_VIRQFD) += vfio_virqfd.o +obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o +obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o +obj-$(CONFIG_VFIO_SPAPR_EEH) += vfio_spapr_eeh.o +obj-$(CONFIG_VFIO_PCI) += pci/ +obj-$(CONFIG_VFIO_PLATFORM) += platform/ +obj-$(CONFIG_VFIO_MDEV) += mdev/ diff --git a/drivers/vfio/mdev/Kconfig b/drivers/vfio/mdev/Kconfig new file mode 100644 index 000000000..14fdb106a --- /dev/null +++ b/drivers/vfio/mdev/Kconfig @@ -0,0 +1,17 @@ + +config VFIO_MDEV + tristate "Mediated device driver framework" + depends on VFIO + default n + help + Provides a framework to virtualize devices. + See Documentation/vfio-mediated-device.txt for more details. + + If you don't know what do here, say N. + +config VFIO_MDEV_DEVICE + tristate "VFIO driver for Mediated devices" + depends on VFIO && VFIO_MDEV + default n + help + VFIO based driver for Mediated devices. diff --git a/drivers/vfio/mdev/Makefile b/drivers/vfio/mdev/Makefile new file mode 100644 index 000000000..fa2d5ea46 --- /dev/null +++ b/drivers/vfio/mdev/Makefile @@ -0,0 +1,5 @@ + +mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o + +obj-$(CONFIG_VFIO_MDEV) += mdev.o +obj-$(CONFIG_VFIO_MDEV_DEVICE) += vfio_mdev.o diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c new file mode 100644 index 000000000..e052f62fd --- /dev/null +++ b/drivers/vfio/mdev/mdev_core.c @@ -0,0 +1,412 @@ +/* + * Mediated device Core Driver + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Author: Neo Jia <cjia@nvidia.com> + * Kirti Wankhede <kwankhede@nvidia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/uuid.h> +#include <linux/sysfs.h> +#include <linux/mdev.h> + +#include "mdev_private.h" + +#define DRIVER_VERSION "0.1" +#define DRIVER_AUTHOR "NVIDIA Corporation" +#define DRIVER_DESC "Mediated device Core Driver" + +static LIST_HEAD(parent_list); +static DEFINE_MUTEX(parent_list_lock); +static struct class_compat *mdev_bus_compat_class; + +static LIST_HEAD(mdev_list); +static DEFINE_MUTEX(mdev_list_lock); + +struct device *mdev_parent_dev(struct mdev_device *mdev) +{ + return mdev->parent->dev; +} +EXPORT_SYMBOL(mdev_parent_dev); + +void *mdev_get_drvdata(struct mdev_device *mdev) +{ + return mdev->driver_data; +} +EXPORT_SYMBOL(mdev_get_drvdata); + +void mdev_set_drvdata(struct mdev_device *mdev, void *data) +{ + mdev->driver_data = data; +} +EXPORT_SYMBOL(mdev_set_drvdata); + +struct device *mdev_dev(struct mdev_device *mdev) +{ + return &mdev->dev; +} +EXPORT_SYMBOL(mdev_dev); + +struct mdev_device *mdev_from_dev(struct device *dev) +{ + return dev_is_mdev(dev) ? to_mdev_device(dev) : NULL; +} +EXPORT_SYMBOL(mdev_from_dev); + +uuid_le mdev_uuid(struct mdev_device *mdev) +{ + return mdev->uuid; +} +EXPORT_SYMBOL(mdev_uuid); + +/* Should be called holding parent_list_lock */ +static struct mdev_parent *__find_parent_device(struct device *dev) +{ + struct mdev_parent *parent; + + list_for_each_entry(parent, &parent_list, next) { + if (parent->dev == dev) + return parent; + } + return NULL; +} + +static void mdev_release_parent(struct kref *kref) +{ + struct mdev_parent *parent = container_of(kref, struct mdev_parent, + ref); + struct device *dev = parent->dev; + + kfree(parent); + put_device(dev); +} + +static +inline struct mdev_parent *mdev_get_parent(struct mdev_parent *parent) +{ + if (parent) + kref_get(&parent->ref); + + return parent; +} + +static inline void mdev_put_parent(struct mdev_parent *parent) +{ + if (parent) + kref_put(&parent->ref, mdev_release_parent); +} + +static int mdev_device_create_ops(struct kobject *kobj, + struct mdev_device *mdev) +{ + struct mdev_parent *parent = mdev->parent; + int ret; + + ret = parent->ops->create(kobj, mdev); + if (ret) + return ret; + + ret = sysfs_create_groups(&mdev->dev.kobj, + parent->ops->mdev_attr_groups); + if (ret) + parent->ops->remove(mdev); + + return ret; +} + +/* + * mdev_device_remove_ops gets called from sysfs's 'remove' and when parent + * device is being unregistered from mdev device framework. + * - 'force_remove' is set to 'false' when called from sysfs's 'remove' which + * indicates that if the mdev device is active, used by VMM or userspace + * application, vendor driver could return error then don't remove the device. + * - 'force_remove' is set to 'true' when called from mdev_unregister_device() + * which indicate that parent device is being removed from mdev device + * framework so remove mdev device forcefully. + */ +static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove) +{ + struct mdev_parent *parent = mdev->parent; + int ret; + + /* + * Vendor driver can return error if VMM or userspace application is + * using this mdev device. + */ + ret = parent->ops->remove(mdev); + if (ret && !force_remove) + return -EBUSY; + + sysfs_remove_groups(&mdev->dev.kobj, parent->ops->mdev_attr_groups); + return 0; +} + +static int mdev_device_remove_cb(struct device *dev, void *data) +{ + if (dev_is_mdev(dev)) + mdev_device_remove(dev, true); + + return 0; +} + +/* + * mdev_register_device : Register a device + * @dev: device structure representing parent device. + * @ops: Parent device operation structure to be registered. + * + * Add device to list of registered parent devices. + * Returns a negative value on error, otherwise 0. + */ +int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) +{ + int ret; + struct mdev_parent *parent; + + /* check for mandatory ops */ + if (!ops || !ops->create || !ops->remove || !ops->supported_type_groups) + return -EINVAL; + + dev = get_device(dev); + if (!dev) + return -EINVAL; + + mutex_lock(&parent_list_lock); + + /* Check for duplicate */ + parent = __find_parent_device(dev); + if (parent) { + parent = NULL; + ret = -EEXIST; + goto add_dev_err; + } + + parent = kzalloc(sizeof(*parent), GFP_KERNEL); + if (!parent) { + ret = -ENOMEM; + goto add_dev_err; + } + + kref_init(&parent->ref); + + parent->dev = dev; + parent->ops = ops; + + if (!mdev_bus_compat_class) { + mdev_bus_compat_class = class_compat_register("mdev_bus"); + if (!mdev_bus_compat_class) { + ret = -ENOMEM; + goto add_dev_err; + } + } + + ret = parent_create_sysfs_files(parent); + if (ret) + goto add_dev_err; + + ret = class_compat_create_link(mdev_bus_compat_class, dev, NULL); + if (ret) + dev_warn(dev, "Failed to create compatibility class link\n"); + + list_add(&parent->next, &parent_list); + mutex_unlock(&parent_list_lock); + + dev_info(dev, "MDEV: Registered\n"); + return 0; + +add_dev_err: + mutex_unlock(&parent_list_lock); + if (parent) + mdev_put_parent(parent); + else + put_device(dev); + return ret; +} +EXPORT_SYMBOL(mdev_register_device); + +/* + * mdev_unregister_device : Unregister a parent device + * @dev: device structure representing parent device. + * + * Remove device from list of registered parent devices. Give a chance to free + * existing mediated devices for given device. + */ + +void mdev_unregister_device(struct device *dev) +{ + struct mdev_parent *parent; + + mutex_lock(&parent_list_lock); + parent = __find_parent_device(dev); + + if (!parent) { + mutex_unlock(&parent_list_lock); + return; + } + dev_info(dev, "MDEV: Unregistering\n"); + + list_del(&parent->next); + class_compat_remove_link(mdev_bus_compat_class, dev, NULL); + + device_for_each_child(dev, NULL, mdev_device_remove_cb); + + parent_remove_sysfs_files(parent); + + mutex_unlock(&parent_list_lock); + mdev_put_parent(parent); +} +EXPORT_SYMBOL(mdev_unregister_device); + +static void mdev_device_release(struct device *dev) +{ + struct mdev_device *mdev = to_mdev_device(dev); + + mutex_lock(&mdev_list_lock); + list_del(&mdev->next); + mutex_unlock(&mdev_list_lock); + + dev_dbg(&mdev->dev, "MDEV: destroying\n"); + kfree(mdev); +} + +int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid) +{ + int ret; + struct mdev_device *mdev, *tmp; + struct mdev_parent *parent; + struct mdev_type *type = to_mdev_type(kobj); + + parent = mdev_get_parent(type->parent); + if (!parent) + return -EINVAL; + + mutex_lock(&mdev_list_lock); + + /* Check for duplicate */ + list_for_each_entry(tmp, &mdev_list, next) { + if (!uuid_le_cmp(tmp->uuid, uuid)) { + mutex_unlock(&mdev_list_lock); + ret = -EEXIST; + goto mdev_fail; + } + } + + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) { + mutex_unlock(&mdev_list_lock); + ret = -ENOMEM; + goto mdev_fail; + } + + memcpy(&mdev->uuid, &uuid, sizeof(uuid_le)); + list_add(&mdev->next, &mdev_list); + mutex_unlock(&mdev_list_lock); + + mdev->parent = parent; + kref_init(&mdev->ref); + + mdev->dev.parent = dev; + mdev->dev.bus = &mdev_bus_type; + mdev->dev.release = mdev_device_release; + dev_set_name(&mdev->dev, "%pUl", uuid.b); + + ret = device_register(&mdev->dev); + if (ret) { + put_device(&mdev->dev); + goto mdev_fail; + } + + ret = mdev_device_create_ops(kobj, mdev); + if (ret) + goto create_fail; + + ret = mdev_create_sysfs_files(&mdev->dev, type); + if (ret) { + mdev_device_remove_ops(mdev, true); + goto create_fail; + } + + mdev->type_kobj = kobj; + mdev->active = true; + dev_dbg(&mdev->dev, "MDEV: created\n"); + + return 0; + +create_fail: + device_unregister(&mdev->dev); +mdev_fail: + mdev_put_parent(parent); + return ret; +} + +int mdev_device_remove(struct device *dev, bool force_remove) +{ + struct mdev_device *mdev, *tmp; + struct mdev_parent *parent; + struct mdev_type *type; + int ret; + + mdev = to_mdev_device(dev); + + mutex_lock(&mdev_list_lock); + list_for_each_entry(tmp, &mdev_list, next) { + if (tmp == mdev) + break; + } + + if (tmp != mdev) { + mutex_unlock(&mdev_list_lock); + return -ENODEV; + } + + if (!mdev->active) { + mutex_unlock(&mdev_list_lock); + return -EAGAIN; + } + + mdev->active = false; + mutex_unlock(&mdev_list_lock); + + type = to_mdev_type(mdev->type_kobj); + parent = mdev->parent; + + ret = mdev_device_remove_ops(mdev, force_remove); + if (ret) { + mdev->active = true; + return ret; + } + + mdev_remove_sysfs_files(dev, type); + device_unregister(dev); + mdev_put_parent(parent); + + return 0; +} + +static int __init mdev_init(void) +{ + return mdev_bus_register(); +} + +static void __exit mdev_exit(void) +{ + if (mdev_bus_compat_class) + class_compat_unregister(mdev_bus_compat_class); + + mdev_bus_unregister(); +} + +module_init(mdev_init) +module_exit(mdev_exit) + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_SOFTDEP("post: vfio_mdev"); diff --git a/drivers/vfio/mdev/mdev_driver.c b/drivers/vfio/mdev/mdev_driver.c new file mode 100644 index 000000000..6f0391f6f --- /dev/null +++ b/drivers/vfio/mdev/mdev_driver.c @@ -0,0 +1,119 @@ +/* + * MDEV driver + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Author: Neo Jia <cjia@nvidia.com> + * Kirti Wankhede <kwankhede@nvidia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/device.h> +#include <linux/iommu.h> +#include <linux/mdev.h> + +#include "mdev_private.h" + +static int mdev_attach_iommu(struct mdev_device *mdev) +{ + int ret; + struct iommu_group *group; + + group = iommu_group_alloc(); + if (IS_ERR(group)) + return PTR_ERR(group); + + ret = iommu_group_add_device(group, &mdev->dev); + if (!ret) + dev_info(&mdev->dev, "MDEV: group_id = %d\n", + iommu_group_id(group)); + + iommu_group_put(group); + return ret; +} + +static void mdev_detach_iommu(struct mdev_device *mdev) +{ + iommu_group_remove_device(&mdev->dev); + dev_info(&mdev->dev, "MDEV: detaching iommu\n"); +} + +static int mdev_probe(struct device *dev) +{ + struct mdev_driver *drv = to_mdev_driver(dev->driver); + struct mdev_device *mdev = to_mdev_device(dev); + int ret; + + ret = mdev_attach_iommu(mdev); + if (ret) + return ret; + + if (drv && drv->probe) { + ret = drv->probe(dev); + if (ret) + mdev_detach_iommu(mdev); + } + + return ret; +} + +static int mdev_remove(struct device *dev) +{ + struct mdev_driver *drv = to_mdev_driver(dev->driver); + struct mdev_device *mdev = to_mdev_device(dev); + + if (drv && drv->remove) + drv->remove(dev); + + mdev_detach_iommu(mdev); + + return 0; +} + +struct bus_type mdev_bus_type = { + .name = "mdev", + .probe = mdev_probe, + .remove = mdev_remove, +}; +EXPORT_SYMBOL_GPL(mdev_bus_type); + +/** + * mdev_register_driver - register a new MDEV driver + * @drv: the driver to register + * @owner: module owner of driver to be registered + * + * Returns a negative value on error, otherwise 0. + **/ +int mdev_register_driver(struct mdev_driver *drv, struct module *owner) +{ + /* initialize common driver fields */ + drv->driver.name = drv->name; + drv->driver.bus = &mdev_bus_type; + drv->driver.owner = owner; + + /* register with core */ + return driver_register(&drv->driver); +} +EXPORT_SYMBOL(mdev_register_driver); + +/* + * mdev_unregister_driver - unregister MDEV driver + * @drv: the driver to unregister + */ +void mdev_unregister_driver(struct mdev_driver *drv) +{ + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL(mdev_unregister_driver); + +int mdev_bus_register(void) +{ + return bus_register(&mdev_bus_type); +} + +void mdev_bus_unregister(void) +{ + bus_unregister(&mdev_bus_type); +} diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h new file mode 100644 index 000000000..b5819b7d7 --- /dev/null +++ b/drivers/vfio/mdev/mdev_private.h @@ -0,0 +1,64 @@ +/* + * Mediated device interal definitions + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Author: Neo Jia <cjia@nvidia.com> + * Kirti Wankhede <kwankhede@nvidia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MDEV_PRIVATE_H +#define MDEV_PRIVATE_H + +int mdev_bus_register(void); +void mdev_bus_unregister(void); + +struct mdev_parent { + struct device *dev; + const struct mdev_parent_ops *ops; + struct kref ref; + struct list_head next; + struct kset *mdev_types_kset; + struct list_head type_list; +}; + +struct mdev_device { + struct device dev; + struct mdev_parent *parent; + uuid_le uuid; + void *driver_data; + struct kref ref; + struct list_head next; + struct kobject *type_kobj; + bool active; +}; + +#define to_mdev_device(dev) container_of(dev, struct mdev_device, dev) +#define dev_is_mdev(d) ((d)->bus == &mdev_bus_type) + +struct mdev_type { + struct kobject kobj; + struct kobject *devices_kobj; + struct mdev_parent *parent; + struct list_head next; + struct attribute_group *group; +}; + +#define to_mdev_type_attr(_attr) \ + container_of(_attr, struct mdev_type_attribute, attr) +#define to_mdev_type(_kobj) \ + container_of(_kobj, struct mdev_type, kobj) + +int parent_create_sysfs_files(struct mdev_parent *parent); +void parent_remove_sysfs_files(struct mdev_parent *parent); + +int mdev_create_sysfs_files(struct device *dev, struct mdev_type *type); +void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type); + +int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid); +int mdev_device_remove(struct device *dev, bool force_remove); + +#endif /* MDEV_PRIVATE_H */ diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c new file mode 100644 index 000000000..c99fcc6c2 --- /dev/null +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -0,0 +1,286 @@ +/* + * File attributes for Mediated devices + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Author: Neo Jia <cjia@nvidia.com> + * Kirti Wankhede <kwankhede@nvidia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/sysfs.h> +#include <linux/ctype.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/uuid.h> +#include <linux/mdev.h> + +#include "mdev_private.h" + +/* Static functions */ + +static ssize_t mdev_type_attr_show(struct kobject *kobj, + struct attribute *__attr, char *buf) +{ + struct mdev_type_attribute *attr = to_mdev_type_attr(__attr); + struct mdev_type *type = to_mdev_type(kobj); + ssize_t ret = -EIO; + + if (attr->show) + ret = attr->show(kobj, type->parent->dev, buf); + return ret; +} + +static ssize_t mdev_type_attr_store(struct kobject *kobj, + struct attribute *__attr, + const char *buf, size_t count) +{ + struct mdev_type_attribute *attr = to_mdev_type_attr(__attr); + struct mdev_type *type = to_mdev_type(kobj); + ssize_t ret = -EIO; + + if (attr->store) + ret = attr->store(&type->kobj, type->parent->dev, buf, count); + return ret; +} + +static const struct sysfs_ops mdev_type_sysfs_ops = { + .show = mdev_type_attr_show, + .store = mdev_type_attr_store, +}; + +static ssize_t create_store(struct kobject *kobj, struct device *dev, + const char *buf, size_t count) +{ + char *str; + uuid_le uuid; + int ret; + + if ((count < UUID_STRING_LEN) || (count > UUID_STRING_LEN + 1)) + return -EINVAL; + + str = kstrndup(buf, count, GFP_KERNEL); + if (!str) + return -ENOMEM; + + ret = uuid_le_to_bin(str, &uuid); + kfree(str); + if (ret) + return ret; + + ret = mdev_device_create(kobj, dev, uuid); + if (ret) + return ret; + + return count; +} + +MDEV_TYPE_ATTR_WO(create); + +static void mdev_type_release(struct kobject *kobj) +{ + struct mdev_type *type = to_mdev_type(kobj); + + pr_debug("Releasing group %s\n", kobj->name); + kfree(type); +} + +static struct kobj_type mdev_type_ktype = { + .sysfs_ops = &mdev_type_sysfs_ops, + .release = mdev_type_release, +}; + +struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent, + struct attribute_group *group) +{ + struct mdev_type *type; + int ret; + + if (!group->name) { + pr_err("%s: Type name empty!\n", __func__); + return ERR_PTR(-EINVAL); + } + + type = kzalloc(sizeof(*type), GFP_KERNEL); + if (!type) + return ERR_PTR(-ENOMEM); + + type->kobj.kset = parent->mdev_types_kset; + type->parent = parent; + + ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL, + "%s-%s", dev_driver_string(parent->dev), + group->name); + if (ret) { + kobject_put(&type->kobj); + return ERR_PTR(ret); + } + + ret = sysfs_create_file(&type->kobj, &mdev_type_attr_create.attr); + if (ret) + goto attr_create_failed; + + type->devices_kobj = kobject_create_and_add("devices", &type->kobj); + if (!type->devices_kobj) { + ret = -ENOMEM; + goto attr_devices_failed; + } + + ret = sysfs_create_files(&type->kobj, + (const struct attribute **)group->attrs); + if (ret) { + ret = -ENOMEM; + goto attrs_failed; + } + + type->group = group; + return type; + +attrs_failed: + kobject_put(type->devices_kobj); +attr_devices_failed: + sysfs_remove_file(&type->kobj, &mdev_type_attr_create.attr); +attr_create_failed: + kobject_del(&type->kobj); + kobject_put(&type->kobj); + return ERR_PTR(ret); +} + +static void remove_mdev_supported_type(struct mdev_type *type) +{ + sysfs_remove_files(&type->kobj, + (const struct attribute **)type->group->attrs); + kobject_put(type->devices_kobj); + sysfs_remove_file(&type->kobj, &mdev_type_attr_create.attr); + kobject_del(&type->kobj); + kobject_put(&type->kobj); +} + +static int add_mdev_supported_type_groups(struct mdev_parent *parent) +{ + int i; + + for (i = 0; parent->ops->supported_type_groups[i]; i++) { + struct mdev_type *type; + + type = add_mdev_supported_type(parent, + parent->ops->supported_type_groups[i]); + if (IS_ERR(type)) { + struct mdev_type *ltype, *tmp; + + list_for_each_entry_safe(ltype, tmp, &parent->type_list, + next) { + list_del(<ype->next); + remove_mdev_supported_type(ltype); + } + return PTR_ERR(type); + } + list_add(&type->next, &parent->type_list); + } + return 0; +} + +/* mdev sysfs functions */ +void parent_remove_sysfs_files(struct mdev_parent *parent) +{ + struct mdev_type *type, *tmp; + + list_for_each_entry_safe(type, tmp, &parent->type_list, next) { + list_del(&type->next); + remove_mdev_supported_type(type); + } + + sysfs_remove_groups(&parent->dev->kobj, parent->ops->dev_attr_groups); + kset_unregister(parent->mdev_types_kset); +} + +int parent_create_sysfs_files(struct mdev_parent *parent) +{ + int ret; + + parent->mdev_types_kset = kset_create_and_add("mdev_supported_types", + NULL, &parent->dev->kobj); + + if (!parent->mdev_types_kset) + return -ENOMEM; + + INIT_LIST_HEAD(&parent->type_list); + + ret = sysfs_create_groups(&parent->dev->kobj, + parent->ops->dev_attr_groups); + if (ret) + goto create_err; + + ret = add_mdev_supported_type_groups(parent); + if (ret) + sysfs_remove_groups(&parent->dev->kobj, + parent->ops->dev_attr_groups); + else + return ret; + +create_err: + kset_unregister(parent->mdev_types_kset); + return ret; +} + +static ssize_t remove_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long val; + + if (kstrtoul(buf, 0, &val) < 0) + return -EINVAL; + + if (val && device_remove_file_self(dev, attr)) { + int ret; + + ret = mdev_device_remove(dev, false); + if (ret) { + device_create_file(dev, attr); + return ret; + } + } + + return count; +} + +static DEVICE_ATTR_WO(remove); + +static const struct attribute *mdev_device_attrs[] = { + &dev_attr_remove.attr, + NULL, +}; + +int mdev_create_sysfs_files(struct device *dev, struct mdev_type *type) +{ + int ret; + + ret = sysfs_create_link(type->devices_kobj, &dev->kobj, dev_name(dev)); + if (ret) + return ret; + + ret = sysfs_create_link(&dev->kobj, &type->kobj, "mdev_type"); + if (ret) + goto type_link_failed; + + ret = sysfs_create_files(&dev->kobj, mdev_device_attrs); + if (ret) + goto create_files_failed; + + return ret; + +create_files_failed: + sysfs_remove_link(&dev->kobj, "mdev_type"); +type_link_failed: + sysfs_remove_link(type->devices_kobj, dev_name(dev)); + return ret; +} + +void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type) +{ + sysfs_remove_files(&dev->kobj, mdev_device_attrs); + sysfs_remove_link(&dev->kobj, "mdev_type"); + sysfs_remove_link(type->devices_kobj, dev_name(dev)); +} diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c new file mode 100644 index 000000000..d230620fe --- /dev/null +++ b/drivers/vfio/mdev/vfio_mdev.c @@ -0,0 +1,148 @@ +/* + * VFIO based driver for Mediated device + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Author: Neo Jia <cjia@nvidia.com> + * Kirti Wankhede <kwankhede@nvidia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/vfio.h> +#include <linux/mdev.h> + +#include "mdev_private.h" + +#define DRIVER_VERSION "0.1" +#define DRIVER_AUTHOR "NVIDIA Corporation" +#define DRIVER_DESC "VFIO based driver for Mediated device" + +static int vfio_mdev_open(void *device_data) +{ + struct mdev_device *mdev = device_data; + struct mdev_parent *parent = mdev->parent; + int ret; + + if (unlikely(!parent->ops->open)) + return -EINVAL; + + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + ret = parent->ops->open(mdev); + if (ret) + module_put(THIS_MODULE); + + return ret; +} + +static void vfio_mdev_release(void *device_data) +{ + struct mdev_device *mdev = device_data; + struct mdev_parent *parent = mdev->parent; + + if (likely(parent->ops->release)) + parent->ops->release(mdev); + + module_put(THIS_MODULE); +} + +static long vfio_mdev_unlocked_ioctl(void *device_data, + unsigned int cmd, unsigned long arg) +{ + struct mdev_device *mdev = device_data; + struct mdev_parent *parent = mdev->parent; + + if (unlikely(!parent->ops->ioctl)) + return -EINVAL; + + return parent->ops->ioctl(mdev, cmd, arg); +} + +static ssize_t vfio_mdev_read(void *device_data, char __user *buf, + size_t count, loff_t *ppos) +{ + struct mdev_device *mdev = device_data; + struct mdev_parent *parent = mdev->parent; + + if (unlikely(!parent->ops->read)) + return -EINVAL; + + return parent->ops->read(mdev, buf, count, ppos); +} + +static ssize_t vfio_mdev_write(void *device_data, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct mdev_device *mdev = device_data; + struct mdev_parent *parent = mdev->parent; + + if (unlikely(!parent->ops->write)) + return -EINVAL; + + return parent->ops->write(mdev, buf, count, ppos); +} + +static int vfio_mdev_mmap(void *device_data, struct vm_area_struct *vma) +{ + struct mdev_device *mdev = device_data; + struct mdev_parent *parent = mdev->parent; + + if (unlikely(!parent->ops->mmap)) + return -EINVAL; + + return parent->ops->mmap(mdev, vma); +} + +static const struct vfio_device_ops vfio_mdev_dev_ops = { + .name = "vfio-mdev", + .open = vfio_mdev_open, + .release = vfio_mdev_release, + .ioctl = vfio_mdev_unlocked_ioctl, + .read = vfio_mdev_read, + .write = vfio_mdev_write, + .mmap = vfio_mdev_mmap, +}; + +static int vfio_mdev_probe(struct device *dev) +{ + struct mdev_device *mdev = to_mdev_device(dev); + + return vfio_add_group_dev(dev, &vfio_mdev_dev_ops, mdev); +} + +static void vfio_mdev_remove(struct device *dev) +{ + vfio_del_group_dev(dev); +} + +static struct mdev_driver vfio_mdev_driver = { + .name = "vfio_mdev", + .probe = vfio_mdev_probe, + .remove = vfio_mdev_remove, +}; + +static int __init vfio_mdev_init(void) +{ + return mdev_register_driver(&vfio_mdev_driver, THIS_MODULE); +} + +static void __exit vfio_mdev_exit(void) +{ + mdev_unregister_driver(&vfio_mdev_driver); +} + +module_init(vfio_mdev_init) +module_exit(vfio_mdev_exit) + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig new file mode 100644 index 000000000..fcbfd0aac --- /dev/null +++ b/drivers/vfio/pci/Kconfig @@ -0,0 +1,41 @@ +config VFIO_PCI + tristate "VFIO support for PCI devices" + depends on VFIO && PCI && EVENTFD + depends on MMU + select VFIO_VIRQFD + select IRQ_BYPASS_MANAGER + help + Support for the PCI VFIO bus driver. This is required to make + use of PCI drivers using the VFIO framework. + + If you don't know what to do here, say N. + +config VFIO_PCI_VGA + bool "VFIO PCI support for VGA devices" + depends on VFIO_PCI && X86 && VGA_ARB + help + Support for VGA extension to VFIO PCI. This exposes an additional + region on VGA devices for accessing legacy VGA addresses used by + BIOS and generic video drivers. + + If you don't know what to do here, say N. + +config VFIO_PCI_MMAP + depends on VFIO_PCI + def_bool y if !S390 + +config VFIO_PCI_INTX + depends on VFIO_PCI + def_bool y if !S390 + +config VFIO_PCI_IGD + bool "VFIO PCI extensions for Intel graphics (GVT-d)" + depends on VFIO_PCI && X86 + default y + help + Support for Intel IGD specific extensions to enable direct + assignment to virtual machines. This includes exposing an IGD + specific firmware table and read-only copies of the host bridge + and LPC bridge config space. + + To enable Intel IGD assignment through vfio-pci, say Y. diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile new file mode 100644 index 000000000..76d8ec058 --- /dev/null +++ b/drivers/vfio/pci/Makefile @@ -0,0 +1,5 @@ + +vfio-pci-y := vfio_pci.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o +vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o + +obj-$(CONFIG_VFIO_PCI) += vfio-pci.o diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c new file mode 100644 index 000000000..51b791c75 --- /dev/null +++ b/drivers/vfio/pci/vfio_pci.c @@ -0,0 +1,1829 @@ +/* + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/device.h> +#include <linux/eventfd.h> +#include <linux/file.h> +#include <linux/interrupt.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/notifier.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include <linux/vfio.h> +#include <linux/vgaarb.h> +#include <linux/nospec.h> +#include <linux/sched/mm.h> + +#include "vfio_pci_private.h" + +#define DRIVER_VERSION "0.2" +#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" +#define DRIVER_DESC "VFIO PCI - User Level meta-driver" + +static char ids[1024] __initdata; +module_param_string(ids, ids, sizeof(ids), 0); +MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified"); + +static bool nointxmask; +module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(nointxmask, + "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag."); + +#ifdef CONFIG_VFIO_PCI_VGA +static bool disable_vga; +module_param(disable_vga, bool, S_IRUGO); +MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci"); +#endif + +static bool disable_idle_d3; +module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(disable_idle_d3, + "Disable using the PCI D3 low power state for idle, unused devices"); + +static DEFINE_MUTEX(driver_lock); + +static inline bool vfio_vga_disabled(void) +{ +#ifdef CONFIG_VFIO_PCI_VGA + return disable_vga; +#else + return true; +#endif +} + +/* + * Our VGA arbiter participation is limited since we don't know anything + * about the device itself. However, if the device is the only VGA device + * downstream of a bridge and VFIO VGA support is disabled, then we can + * safely return legacy VGA IO and memory as not decoded since the user + * has no way to get to it and routing can be disabled externally at the + * bridge. + */ +static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga) +{ + struct vfio_pci_device *vdev = opaque; + struct pci_dev *tmp = NULL, *pdev = vdev->pdev; + unsigned char max_busnr; + unsigned int decodes; + + if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus)) + return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM | + VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; + + max_busnr = pci_bus_max_busnr(pdev->bus); + decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + + while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) { + if (tmp == pdev || + pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) || + pci_is_root_bus(tmp->bus)) + continue; + + if (tmp->bus->number >= pdev->bus->number && + tmp->bus->number <= max_busnr) { + pci_dev_put(tmp); + decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; + break; + } + } + + return decodes; +} + +static inline bool vfio_pci_is_vga(struct pci_dev *pdev) +{ + return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA; +} + +static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev) +{ + struct resource *res; + int bar; + struct vfio_pci_dummy_resource *dummy_res; + + for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) { + res = vdev->pdev->resource + bar; + + if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP)) + goto no_mmap; + + if (!(res->flags & IORESOURCE_MEM)) + goto no_mmap; + + /* + * The PCI core shouldn't set up a resource with a + * type but zero size. But there may be bugs that + * cause us to do that. + */ + if (!resource_size(res)) + goto no_mmap; + + if (resource_size(res) >= PAGE_SIZE) { + vdev->bar_mmap_supported[bar] = true; + continue; + } + + if (!(res->start & ~PAGE_MASK)) { + /* + * Add a dummy resource to reserve the remainder + * of the exclusive page in case that hot-add + * device's bar is assigned into it. + */ + dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL); + if (dummy_res == NULL) + goto no_mmap; + + dummy_res->resource.name = "vfio sub-page reserved"; + dummy_res->resource.start = res->end + 1; + dummy_res->resource.end = res->start + PAGE_SIZE - 1; + dummy_res->resource.flags = res->flags; + if (request_resource(res->parent, + &dummy_res->resource)) { + kfree(dummy_res); + goto no_mmap; + } + dummy_res->index = bar; + list_add(&dummy_res->res_next, + &vdev->dummy_resources_list); + vdev->bar_mmap_supported[bar] = true; + continue; + } + /* + * Here we don't handle the case when the BAR is not page + * aligned because we can't expect the BAR will be + * assigned into the same location in a page in guest + * when we passthrough the BAR. And it's hard to access + * this BAR in userspace because we have no way to get + * the BAR's location in a page. + */ +no_mmap: + vdev->bar_mmap_supported[bar] = false; + } +} + +static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev); +static void vfio_pci_disable(struct vfio_pci_device *vdev); +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data); + +/* + * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND + * _and_ the ability detect when the device is asserting INTx via PCI_STATUS. + * If a device implements the former but not the latter we would typically + * expect broken_intx_masking be set and require an exclusive interrupt. + * However since we do have control of the device's ability to assert INTx, + * we can instead pretend that the device does not implement INTx, virtualizing + * the pin register to report zero and maintaining DisINTx set on the host. + */ +static bool vfio_pci_nointx(struct pci_dev *pdev) +{ + switch (pdev->vendor) { + case PCI_VENDOR_ID_INTEL: + switch (pdev->device) { + /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */ + case 0x1572: + case 0x1574: + case 0x1580 ... 0x1581: + case 0x1583 ... 0x158b: + case 0x37d0 ... 0x37d2: + return true; + default: + return false; + } + } + + return false; +} + +static int vfio_pci_enable(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + int ret; + u16 cmd; + u8 msix_pos; + + pci_set_power_state(pdev, PCI_D0); + + /* Don't allow our initial saved state to include busmaster */ + pci_clear_master(pdev); + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + /* If reset fails because of the device lock, fail this path entirely */ + ret = pci_try_reset_function(pdev); + if (ret == -EAGAIN) { + pci_disable_device(pdev); + return ret; + } + + vdev->reset_works = !ret; + pci_save_state(pdev); + vdev->pci_saved_state = pci_store_saved_state(pdev); + if (!vdev->pci_saved_state) + pr_debug("%s: Couldn't store %s saved state\n", + __func__, dev_name(&pdev->dev)); + + if (likely(!nointxmask)) { + if (vfio_pci_nointx(pdev)) { + dev_info(&pdev->dev, "Masking broken INTx support\n"); + vdev->nointx = true; + pci_intx(pdev, 0); + } else + vdev->pci_2_3 = pci_intx_mask_supported(pdev); + } + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) { + cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } + + ret = vfio_config_init(vdev); + if (ret) { + kfree(vdev->pci_saved_state); + vdev->pci_saved_state = NULL; + pci_disable_device(pdev); + return ret; + } + + msix_pos = pdev->msix_cap; + if (msix_pos) { + u16 flags; + u32 table; + + pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags); + pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table); + + vdev->msix_bar = table & PCI_MSIX_TABLE_BIR; + vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET; + vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16; + } else + vdev->msix_bar = 0xFF; + + if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev)) + vdev->has_vga = true; + + + if (vfio_pci_is_vga(pdev) && + pdev->vendor == PCI_VENDOR_ID_INTEL && + IS_ENABLED(CONFIG_VFIO_PCI_IGD)) { + ret = vfio_pci_igd_init(vdev); + if (ret) { + dev_warn(&vdev->pdev->dev, + "Failed to setup Intel IGD regions\n"); + vfio_pci_disable(vdev); + return ret; + } + } + + vfio_pci_probe_mmaps(vdev); + + return 0; +} + +static void vfio_pci_disable(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + struct vfio_pci_dummy_resource *dummy_res, *tmp; + struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp; + int i, bar; + + /* Stop the device from further DMA */ + pci_clear_master(pdev); + + vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE | + VFIO_IRQ_SET_ACTION_TRIGGER, + vdev->irq_type, 0, 0, NULL); + + /* Device closed, don't need mutex here */ + list_for_each_entry_safe(ioeventfd, ioeventfd_tmp, + &vdev->ioeventfds_list, next) { + vfio_virqfd_disable(&ioeventfd->virqfd); + list_del(&ioeventfd->next); + kfree(ioeventfd); + } + vdev->ioeventfds_nr = 0; + + vdev->virq_disabled = false; + + for (i = 0; i < vdev->num_regions; i++) + vdev->region[i].ops->release(vdev, &vdev->region[i]); + + vdev->num_regions = 0; + kfree(vdev->region); + vdev->region = NULL; /* don't krealloc a freed pointer */ + + vfio_config_free(vdev); + + for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) { + if (!vdev->barmap[bar]) + continue; + pci_iounmap(pdev, vdev->barmap[bar]); + pci_release_selected_regions(pdev, 1 << bar); + vdev->barmap[bar] = NULL; + } + + list_for_each_entry_safe(dummy_res, tmp, + &vdev->dummy_resources_list, res_next) { + list_del(&dummy_res->res_next); + release_resource(&dummy_res->resource); + kfree(dummy_res); + } + + vdev->needs_reset = true; + + /* + * If we have saved state, restore it. If we can reset the device, + * even better. Resetting with current state seems better than + * nothing, but saving and restoring current state without reset + * is just busy work. + */ + if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) { + pr_info("%s: Couldn't reload %s saved state\n", + __func__, dev_name(&pdev->dev)); + + if (!vdev->reset_works) + goto out; + + pci_save_state(pdev); + } + + /* + * Disable INTx and MSI, presumably to avoid spurious interrupts + * during reset. Stolen from pci_reset_function() + */ + pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); + + /* + * Try to get the locks ourselves to prevent a deadlock. The + * success of this is dependent on being able to lock the device, + * which is not always possible. + * We can not use the "try" reset interface here, which will + * overwrite the previously restored configuration information. + */ + if (vdev->reset_works && pci_cfg_access_trylock(pdev)) { + if (device_trylock(&pdev->dev)) { + if (!__pci_reset_function_locked(pdev)) + vdev->needs_reset = false; + device_unlock(&pdev->dev); + } + pci_cfg_access_unlock(pdev); + } + + pci_restore_state(pdev); +out: + pci_disable_device(pdev); + + vfio_pci_try_bus_reset(vdev); + + if (!disable_idle_d3) + pci_set_power_state(pdev, PCI_D3hot); +} + +static void vfio_pci_release(void *device_data) +{ + struct vfio_pci_device *vdev = device_data; + + mutex_lock(&driver_lock); + + if (!(--vdev->refcnt)) { + vfio_spapr_pci_eeh_release(vdev->pdev); + vfio_pci_disable(vdev); + mutex_lock(&vdev->igate); + if (vdev->err_trigger) { + eventfd_ctx_put(vdev->err_trigger); + vdev->err_trigger = NULL; + } + mutex_unlock(&vdev->igate); + + mutex_lock(&vdev->igate); + if (vdev->req_trigger) { + eventfd_ctx_put(vdev->req_trigger); + vdev->req_trigger = NULL; + } + mutex_unlock(&vdev->igate); + } + + mutex_unlock(&driver_lock); + + module_put(THIS_MODULE); +} + +static int vfio_pci_open(void *device_data) +{ + struct vfio_pci_device *vdev = device_data; + int ret = 0; + + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + mutex_lock(&driver_lock); + + if (!vdev->refcnt) { + ret = vfio_pci_enable(vdev); + if (ret) + goto error; + + vfio_spapr_pci_eeh_open(vdev->pdev); + } + vdev->refcnt++; +error: + mutex_unlock(&driver_lock); + if (ret) + module_put(THIS_MODULE); + return ret; +} + +static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type) +{ + if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) { + u8 pin; + + if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || + vdev->nointx || vdev->pdev->is_virtfn) + return 0; + + pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin); + + return pin ? 1 : 0; + } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) { + u8 pos; + u16 flags; + + pos = vdev->pdev->msi_cap; + if (pos) { + pci_read_config_word(vdev->pdev, + pos + PCI_MSI_FLAGS, &flags); + return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1); + } + } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) { + u8 pos; + u16 flags; + + pos = vdev->pdev->msix_cap; + if (pos) { + pci_read_config_word(vdev->pdev, + pos + PCI_MSIX_FLAGS, &flags); + + return (flags & PCI_MSIX_FLAGS_QSIZE) + 1; + } + } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) { + if (pci_is_pcie(vdev->pdev)) + return 1; + } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) { + return 1; + } + + return 0; +} + +static int vfio_pci_count_devs(struct pci_dev *pdev, void *data) +{ + (*(int *)data)++; + return 0; +} + +struct vfio_pci_fill_info { + int max; + int cur; + struct vfio_pci_dependent_device *devices; +}; + +static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data) +{ + struct vfio_pci_fill_info *fill = data; + struct iommu_group *iommu_group; + + if (fill->cur == fill->max) + return -EAGAIN; /* Something changed, try again */ + + iommu_group = iommu_group_get(&pdev->dev); + if (!iommu_group) + return -EPERM; /* Cannot reset non-isolated devices */ + + fill->devices[fill->cur].group_id = iommu_group_id(iommu_group); + fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus); + fill->devices[fill->cur].bus = pdev->bus->number; + fill->devices[fill->cur].devfn = pdev->devfn; + fill->cur++; + iommu_group_put(iommu_group); + return 0; +} + +struct vfio_pci_group_entry { + struct vfio_group *group; + int id; +}; + +struct vfio_pci_group_info { + int count; + struct vfio_pci_group_entry *groups; +}; + +static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data) +{ + struct vfio_pci_group_info *info = data; + struct iommu_group *group; + int id, i; + + group = iommu_group_get(&pdev->dev); + if (!group) + return -EPERM; + + id = iommu_group_id(group); + + for (i = 0; i < info->count; i++) + if (info->groups[i].id == id) + break; + + iommu_group_put(group); + + return (i == info->count) ? -EINVAL : 0; +} + +static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot) +{ + for (; pdev; pdev = pdev->bus->self) + if (pdev->bus == slot->bus) + return (pdev->slot == slot); + return false; +} + +struct vfio_pci_walk_info { + int (*fn)(struct pci_dev *, void *data); + void *data; + struct pci_dev *pdev; + bool slot; + int ret; +}; + +static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data) +{ + struct vfio_pci_walk_info *walk = data; + + if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot)) + walk->ret = walk->fn(pdev, walk->data); + + return walk->ret; +} + +static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev, + int (*fn)(struct pci_dev *, + void *data), void *data, + bool slot) +{ + struct vfio_pci_walk_info walk = { + .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0, + }; + + pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk); + + return walk.ret; +} + +static int msix_mmappable_cap(struct vfio_pci_device *vdev, + struct vfio_info_cap *caps) +{ + struct vfio_info_cap_header header = { + .id = VFIO_REGION_INFO_CAP_MSIX_MAPPABLE, + .version = 1 + }; + + return vfio_info_add_capability(caps, &header, sizeof(header)); +} + +int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, + unsigned int type, unsigned int subtype, + const struct vfio_pci_regops *ops, + size_t size, u32 flags, void *data) +{ + struct vfio_pci_region *region; + + region = krealloc(vdev->region, + (vdev->num_regions + 1) * sizeof(*region), + GFP_KERNEL); + if (!region) + return -ENOMEM; + + vdev->region = region; + vdev->region[vdev->num_regions].type = type; + vdev->region[vdev->num_regions].subtype = subtype; + vdev->region[vdev->num_regions].ops = ops; + vdev->region[vdev->num_regions].size = size; + vdev->region[vdev->num_regions].flags = flags; + vdev->region[vdev->num_regions].data = data; + + vdev->num_regions++; + + return 0; +} + +struct vfio_devices { + struct vfio_device **devices; + int cur_index; + int max_index; +}; + +static long vfio_pci_ioctl(void *device_data, + unsigned int cmd, unsigned long arg) +{ + struct vfio_pci_device *vdev = device_data; + unsigned long minsz; + + if (cmd == VFIO_DEVICE_GET_INFO) { + struct vfio_device_info info; + + minsz = offsetofend(struct vfio_device_info, num_irqs); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + info.flags = VFIO_DEVICE_FLAGS_PCI; + + if (vdev->reset_works) + info.flags |= VFIO_DEVICE_FLAGS_RESET; + + info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions; + info.num_irqs = VFIO_PCI_NUM_IRQS; + + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { + struct pci_dev *pdev = vdev->pdev; + struct vfio_region_info info; + struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; + int i, ret; + + minsz = offsetofend(struct vfio_region_info, offset); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + switch (info.index) { + case VFIO_PCI_CONFIG_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = pdev->cfg_size; + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + break; + case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = pci_resource_len(pdev, info.index); + if (!info.size) { + info.flags = 0; + break; + } + + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + if (vdev->bar_mmap_supported[info.index]) { + info.flags |= VFIO_REGION_INFO_FLAG_MMAP; + if (info.index == vdev->msix_bar) { + ret = msix_mmappable_cap(vdev, &caps); + if (ret) + return ret; + } + } + + break; + case VFIO_PCI_ROM_REGION_INDEX: + { + void __iomem *io; + size_t size; + u16 cmd; + + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.flags = 0; + + /* Report the BAR size, not the ROM size */ + info.size = pci_resource_len(pdev, info.index); + if (!info.size) { + /* Shadow ROMs appear as PCI option ROMs */ + if (pdev->resource[PCI_ROM_RESOURCE].flags & + IORESOURCE_ROM_SHADOW) + info.size = 0x20000; + else + break; + } + + /* + * Is it really there? Enable memory decode for + * implicit access in pci_map_rom(). + */ + cmd = vfio_pci_memory_lock_and_enable(vdev); + io = pci_map_rom(pdev, &size); + if (io) { + info.flags = VFIO_REGION_INFO_FLAG_READ; + pci_unmap_rom(pdev, io); + } else { + info.size = 0; + } + vfio_pci_memory_unlock_and_restore(vdev, cmd); + + break; + } + case VFIO_PCI_VGA_REGION_INDEX: + if (!vdev->has_vga) + return -EINVAL; + + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = 0xc0000; + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + + break; + default: + { + struct vfio_region_info_cap_type cap_type = { + .header.id = VFIO_REGION_INFO_CAP_TYPE, + .header.version = 1 }; + + if (info.index >= + VFIO_PCI_NUM_REGIONS + vdev->num_regions) + return -EINVAL; + info.index = array_index_nospec(info.index, + VFIO_PCI_NUM_REGIONS + + vdev->num_regions); + + i = info.index - VFIO_PCI_NUM_REGIONS; + + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = vdev->region[i].size; + info.flags = vdev->region[i].flags; + + cap_type.type = vdev->region[i].type; + cap_type.subtype = vdev->region[i].subtype; + + ret = vfio_info_add_capability(&caps, &cap_type.header, + sizeof(cap_type)); + if (ret) + return ret; + + } + } + + if (caps.size) { + info.flags |= VFIO_REGION_INFO_FLAG_CAPS; + if (info.argsz < sizeof(info) + caps.size) { + info.argsz = sizeof(info) + caps.size; + info.cap_offset = 0; + } else { + vfio_info_cap_shift(&caps, sizeof(info)); + if (copy_to_user((void __user *)arg + + sizeof(info), caps.buf, + caps.size)) { + kfree(caps.buf); + return -EFAULT; + } + info.cap_offset = sizeof(info); + } + + kfree(caps.buf); + } + + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { + struct vfio_irq_info info; + + minsz = offsetofend(struct vfio_irq_info, count); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS) + return -EINVAL; + + switch (info.index) { + case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX: + case VFIO_PCI_REQ_IRQ_INDEX: + break; + case VFIO_PCI_ERR_IRQ_INDEX: + if (pci_is_pcie(vdev->pdev)) + break; + /* fall through */ + default: + return -EINVAL; + } + + info.flags = VFIO_IRQ_INFO_EVENTFD; + + info.count = vfio_pci_get_irq_count(vdev, info.index); + + if (info.index == VFIO_PCI_INTX_IRQ_INDEX) + info.flags |= (VFIO_IRQ_INFO_MASKABLE | + VFIO_IRQ_INFO_AUTOMASKED); + else + info.flags |= VFIO_IRQ_INFO_NORESIZE; + + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_SET_IRQS) { + struct vfio_irq_set hdr; + u8 *data = NULL; + int max, ret = 0; + size_t data_size = 0; + + minsz = offsetofend(struct vfio_irq_set, count); + + if (copy_from_user(&hdr, (void __user *)arg, minsz)) + return -EFAULT; + + max = vfio_pci_get_irq_count(vdev, hdr.index); + + ret = vfio_set_irqs_validate_and_prepare(&hdr, max, + VFIO_PCI_NUM_IRQS, &data_size); + if (ret) + return ret; + + if (data_size) { + data = memdup_user((void __user *)(arg + minsz), + data_size); + if (IS_ERR(data)) + return PTR_ERR(data); + } + + mutex_lock(&vdev->igate); + + ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index, + hdr.start, hdr.count, data); + + mutex_unlock(&vdev->igate); + kfree(data); + + return ret; + + } else if (cmd == VFIO_DEVICE_RESET) { + int ret; + + if (!vdev->reset_works) + return -EINVAL; + + vfio_pci_zap_and_down_write_memory_lock(vdev); + ret = pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + + return ret; + + } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) { + struct vfio_pci_hot_reset_info hdr; + struct vfio_pci_fill_info fill = { 0 }; + struct vfio_pci_dependent_device *devices = NULL; + bool slot = false; + int ret = 0; + + minsz = offsetofend(struct vfio_pci_hot_reset_info, count); + + if (copy_from_user(&hdr, (void __user *)arg, minsz)) + return -EFAULT; + + if (hdr.argsz < minsz) + return -EINVAL; + + hdr.flags = 0; + + /* Can we do a slot or bus reset or neither? */ + if (!pci_probe_reset_slot(vdev->pdev->slot)) + slot = true; + else if (pci_probe_reset_bus(vdev->pdev->bus)) + return -ENODEV; + + /* How many devices are affected? */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_count_devs, + &fill.max, slot); + if (ret) + return ret; + + WARN_ON(!fill.max); /* Should always be at least one */ + + /* + * If there's enough space, fill it now, otherwise return + * -ENOSPC and the number of devices affected. + */ + if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) { + ret = -ENOSPC; + hdr.count = fill.max; + goto reset_info_exit; + } + + devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL); + if (!devices) + return -ENOMEM; + + fill.devices = devices; + + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_fill_devs, + &fill, slot); + + /* + * If a device was removed between counting and filling, + * we may come up short of fill.max. If a device was + * added, we'll have a return of -EAGAIN above. + */ + if (!ret) + hdr.count = fill.cur; + +reset_info_exit: + if (copy_to_user((void __user *)arg, &hdr, minsz)) + ret = -EFAULT; + + if (!ret) { + if (copy_to_user((void __user *)(arg + minsz), devices, + hdr.count * sizeof(*devices))) + ret = -EFAULT; + } + + kfree(devices); + return ret; + + } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) { + struct vfio_pci_hot_reset hdr; + int32_t *group_fds; + struct vfio_pci_group_entry *groups; + struct vfio_pci_group_info info; + struct vfio_devices devs = { .cur_index = 0 }; + bool slot = false; + int i, group_idx, mem_idx = 0, count = 0, ret = 0; + + minsz = offsetofend(struct vfio_pci_hot_reset, count); + + if (copy_from_user(&hdr, (void __user *)arg, minsz)) + return -EFAULT; + + if (hdr.argsz < minsz || hdr.flags) + return -EINVAL; + + /* Can we do a slot or bus reset or neither? */ + if (!pci_probe_reset_slot(vdev->pdev->slot)) + slot = true; + else if (pci_probe_reset_bus(vdev->pdev->bus)) + return -ENODEV; + + /* + * We can't let userspace give us an arbitrarily large + * buffer to copy, so verify how many we think there + * could be. Note groups can have multiple devices so + * one group per device is the max. + */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_count_devs, + &count, slot); + if (ret) + return ret; + + /* Somewhere between 1 and count is OK */ + if (!hdr.count || hdr.count > count) + return -EINVAL; + + group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL); + groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL); + if (!group_fds || !groups) { + kfree(group_fds); + kfree(groups); + return -ENOMEM; + } + + if (copy_from_user(group_fds, (void __user *)(arg + minsz), + hdr.count * sizeof(*group_fds))) { + kfree(group_fds); + kfree(groups); + return -EFAULT; + } + + /* + * For each group_fd, get the group through the vfio external + * user interface and store the group and iommu ID. This + * ensures the group is held across the reset. + */ + for (group_idx = 0; group_idx < hdr.count; group_idx++) { + struct vfio_group *group; + struct fd f = fdget(group_fds[group_idx]); + if (!f.file) { + ret = -EBADF; + break; + } + + group = vfio_group_get_external_user(f.file); + fdput(f); + if (IS_ERR(group)) { + ret = PTR_ERR(group); + break; + } + + groups[group_idx].group = group; + groups[group_idx].id = + vfio_external_user_iommu_id(group); + } + + kfree(group_fds); + + /* release reference to groups on error */ + if (ret) + goto hot_reset_release; + + info.count = hdr.count; + info.groups = groups; + + /* + * Test whether all the affected devices are contained + * by the set of groups provided by the user. + */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_validate_devs, + &info, slot); + if (ret) + goto hot_reset_release; + + devs.max_index = count; + devs.devices = kcalloc(count, sizeof(struct vfio_device *), + GFP_KERNEL); + if (!devs.devices) { + ret = -ENOMEM; + goto hot_reset_release; + } + + /* + * We need to get memory_lock for each device, but devices + * can share mmap_sem, therefore we need to zap and hold + * the vma_lock for each device, and only then get each + * memory_lock. + */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_try_zap_and_vma_lock_cb, + &devs, slot); + if (ret) + goto hot_reset_release; + + for (; mem_idx < devs.cur_index; mem_idx++) { + struct vfio_pci_device *tmp; + + tmp = vfio_device_data(devs.devices[mem_idx]); + + ret = down_write_trylock(&tmp->memory_lock); + if (!ret) { + ret = -EBUSY; + goto hot_reset_release; + } + mutex_unlock(&tmp->vma_lock); + } + + /* User has access, do the reset */ + ret = pci_reset_bus(vdev->pdev); + +hot_reset_release: + for (i = 0; i < devs.cur_index; i++) { + struct vfio_device *device; + struct vfio_pci_device *tmp; + + device = devs.devices[i]; + tmp = vfio_device_data(device); + + if (i < mem_idx) + up_write(&tmp->memory_lock); + else + mutex_unlock(&tmp->vma_lock); + vfio_device_put(device); + } + kfree(devs.devices); + + for (group_idx--; group_idx >= 0; group_idx--) + vfio_group_put_external_user(groups[group_idx].group); + + kfree(groups); + return ret; + } else if (cmd == VFIO_DEVICE_IOEVENTFD) { + struct vfio_device_ioeventfd ioeventfd; + int count; + + minsz = offsetofend(struct vfio_device_ioeventfd, fd); + + if (copy_from_user(&ioeventfd, (void __user *)arg, minsz)) + return -EFAULT; + + if (ioeventfd.argsz < minsz) + return -EINVAL; + + if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK) + return -EINVAL; + + count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK; + + if (hweight8(count) != 1 || ioeventfd.fd < -1) + return -EINVAL; + + return vfio_pci_ioeventfd(vdev, ioeventfd.offset, + ioeventfd.data, count, ioeventfd.fd); + } + + return -ENOTTY; +} + +static ssize_t vfio_pci_rw(void *device_data, char __user *buf, + size_t count, loff_t *ppos, bool iswrite) +{ + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + struct vfio_pci_device *vdev = device_data; + + if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) + return -EINVAL; + + switch (index) { + case VFIO_PCI_CONFIG_REGION_INDEX: + return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite); + + case VFIO_PCI_ROM_REGION_INDEX: + if (iswrite) + return -EINVAL; + return vfio_pci_bar_rw(vdev, buf, count, ppos, false); + + case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: + return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite); + + case VFIO_PCI_VGA_REGION_INDEX: + return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite); + default: + index -= VFIO_PCI_NUM_REGIONS; + return vdev->region[index].ops->rw(vdev, buf, + count, ppos, iswrite); + } + + return -EINVAL; +} + +static ssize_t vfio_pci_read(void *device_data, char __user *buf, + size_t count, loff_t *ppos) +{ + if (!count) + return 0; + + return vfio_pci_rw(device_data, buf, count, ppos, false); +} + +static ssize_t vfio_pci_write(void *device_data, const char __user *buf, + size_t count, loff_t *ppos) +{ + if (!count) + return 0; + + return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true); +} + +/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */ +static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try) +{ + struct vfio_pci_mmap_vma *mmap_vma, *tmp; + + /* + * Lock ordering: + * vma_lock is nested under mmap_sem for vm_ops callback paths. + * The memory_lock semaphore is used by both code paths calling + * into this function to zap vmas and the vm_ops.fault callback + * to protect the memory enable state of the device. + * + * When zapping vmas we need to maintain the mmap_sem => vma_lock + * ordering, which requires using vma_lock to walk vma_list to + * acquire an mm, then dropping vma_lock to get the mmap_sem and + * reacquiring vma_lock. This logic is derived from similar + * requirements in uverbs_user_mmap_disassociate(). + * + * mmap_sem must always be the top-level lock when it is taken. + * Therefore we can only hold the memory_lock write lock when + * vma_list is empty, as we'd need to take mmap_sem to clear + * entries. vma_list can only be guaranteed empty when holding + * vma_lock, thus memory_lock is nested under vma_lock. + * + * This enables the vm_ops.fault callback to acquire vma_lock, + * followed by memory_lock read lock, while already holding + * mmap_sem without risk of deadlock. + */ + while (1) { + struct mm_struct *mm = NULL; + + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) + return 0; + } else { + mutex_lock(&vdev->vma_lock); + } + while (!list_empty(&vdev->vma_list)) { + mmap_vma = list_first_entry(&vdev->vma_list, + struct vfio_pci_mmap_vma, + vma_next); + mm = mmap_vma->vma->vm_mm; + if (mmget_not_zero(mm)) + break; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + mm = NULL; + } + if (!mm) + return 1; + mutex_unlock(&vdev->vma_lock); + + if (try) { + if (!down_read_trylock(&mm->mmap_sem)) { + mmput(mm); + return 0; + } + } else { + down_read(&mm->mmap_sem); + } + if (mmget_still_valid(mm)) { + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) { + up_read(&mm->mmap_sem); + mmput(mm); + return 0; + } + } else { + mutex_lock(&vdev->vma_lock); + } + list_for_each_entry_safe(mmap_vma, tmp, + &vdev->vma_list, vma_next) { + struct vm_area_struct *vma = mmap_vma->vma; + + if (vma->vm_mm != mm) + continue; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + + zap_vma_ptes(vma, vma->vm_start, + vma->vm_end - vma->vm_start); + } + mutex_unlock(&vdev->vma_lock); + } + up_read(&mm->mmap_sem); + mmput(mm); + } +} + +void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev) +{ + vfio_pci_zap_and_vma_lock(vdev, false); + down_write(&vdev->memory_lock); + mutex_unlock(&vdev->vma_lock); +} + +u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev) +{ + u16 cmd; + + down_write(&vdev->memory_lock); + pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MEMORY)) + pci_write_config_word(vdev->pdev, PCI_COMMAND, + cmd | PCI_COMMAND_MEMORY); + + return cmd; +} + +void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd) +{ + pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd); + up_write(&vdev->memory_lock); +} + +/* Caller holds vma_lock */ +static int __vfio_pci_add_vma(struct vfio_pci_device *vdev, + struct vm_area_struct *vma) +{ + struct vfio_pci_mmap_vma *mmap_vma; + + mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL); + if (!mmap_vma) + return -ENOMEM; + + mmap_vma->vma = vma; + list_add(&mmap_vma->vma_next, &vdev->vma_list); + + return 0; +} + +/* + * Zap mmaps on open so that we can fault them in on access and therefore + * our vma_list only tracks mappings accessed since last zap. + */ +static void vfio_pci_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void vfio_pci_mmap_close(struct vm_area_struct *vma) +{ + struct vfio_pci_device *vdev = vma->vm_private_data; + struct vfio_pci_mmap_vma *mmap_vma; + + mutex_lock(&vdev->vma_lock); + list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { + if (mmap_vma->vma == vma) { + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + break; + } + } + mutex_unlock(&vdev->vma_lock); +} + +static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct vfio_pci_device *vdev = vma->vm_private_data; + struct vfio_pci_mmap_vma *mmap_vma; + vm_fault_t ret = VM_FAULT_NOPAGE; + + mutex_lock(&vdev->vma_lock); + down_read(&vdev->memory_lock); + + if (!__vfio_pci_memory_enabled(vdev)) { + ret = VM_FAULT_SIGBUS; + goto up_out; + } + + /* + * We populate the whole vma on fault, so we need to test whether + * the vma has already been mapped, such as for concurrent faults + * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if + * we ask it to fill the same range again. + */ + list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { + if (mmap_vma->vma == vma) + goto up_out; + } + + if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) { + ret = VM_FAULT_SIGBUS; + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); + goto up_out; + } + + if (__vfio_pci_add_vma(vdev, vma)) { + ret = VM_FAULT_OOM; + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); + } + +up_out: + up_read(&vdev->memory_lock); + mutex_unlock(&vdev->vma_lock); + return ret; +} + +static const struct vm_operations_struct vfio_pci_mmap_ops = { + .open = vfio_pci_mmap_open, + .close = vfio_pci_mmap_close, + .fault = vfio_pci_mmap_fault, +}; + +static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) +{ + struct vfio_pci_device *vdev = device_data; + struct pci_dev *pdev = vdev->pdev; + unsigned int index; + u64 phys_len, req_len, pgoff, req_start; + int ret; + + index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); + + if (vma->vm_end < vma->vm_start) + return -EINVAL; + if ((vma->vm_flags & VM_SHARED) == 0) + return -EINVAL; + if (index >= VFIO_PCI_ROM_REGION_INDEX) + return -EINVAL; + if (!vdev->bar_mmap_supported[index]) + return -EINVAL; + + phys_len = PAGE_ALIGN(pci_resource_len(pdev, index)); + req_len = vma->vm_end - vma->vm_start; + pgoff = vma->vm_pgoff & + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + req_start = pgoff << PAGE_SHIFT; + + if (req_start + req_len > phys_len) + return -EINVAL; + + /* + * Even though we don't make use of the barmap for the mmap, + * we need to request the region and the barmap tracks that. + */ + if (!vdev->barmap[index]) { + ret = pci_request_selected_regions(pdev, + 1 << index, "vfio-pci"); + if (ret) + return ret; + + vdev->barmap[index] = pci_iomap(pdev, index, 0); + if (!vdev->barmap[index]) { + pci_release_selected_regions(pdev, 1 << index); + return -ENOMEM; + } + } + + vma->vm_private_data = vdev; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff; + + /* + * See remap_pfn_range(), called from vfio_pci_fault() but we can't + * change vm_flags within the fault handler. Set them now. + */ + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_ops = &vfio_pci_mmap_ops; + + return 0; +} + +static void vfio_pci_request(void *device_data, unsigned int count) +{ + struct vfio_pci_device *vdev = device_data; + + mutex_lock(&vdev->igate); + + if (vdev->req_trigger) { + if (!(count % 10)) + dev_notice_ratelimited(&vdev->pdev->dev, + "Relaying device request to user (#%u)\n", + count); + eventfd_signal(vdev->req_trigger, 1); + } else if (count == 0) { + dev_warn(&vdev->pdev->dev, + "No device request channel registered, blocked until released by user\n"); + } + + mutex_unlock(&vdev->igate); +} + +static const struct vfio_device_ops vfio_pci_ops = { + .name = "vfio-pci", + .open = vfio_pci_open, + .release = vfio_pci_release, + .ioctl = vfio_pci_ioctl, + .read = vfio_pci_read, + .write = vfio_pci_write, + .mmap = vfio_pci_mmap, + .request = vfio_pci_request, +}; + +static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct vfio_pci_device *vdev; + struct iommu_group *group; + int ret; + + if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) + return -EINVAL; + + /* + * Prevent binding to PFs with VFs enabled, this too easily allows + * userspace instance with VFs and PFs from the same device, which + * cannot work. Disabling SR-IOV here would initiate removing the + * VFs, which would unbind the driver, which is prone to blocking + * if that VF is also in use by vfio-pci. Just reject these PFs + * and let the user sort it out. + */ + if (pci_num_vf(pdev)) { + pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n"); + return -EBUSY; + } + + group = vfio_iommu_group_get(&pdev->dev); + if (!group) + return -EINVAL; + + vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); + if (!vdev) { + vfio_iommu_group_put(group, &pdev->dev); + return -ENOMEM; + } + + vdev->pdev = pdev; + vdev->irq_type = VFIO_PCI_NUM_IRQS; + mutex_init(&vdev->igate); + spin_lock_init(&vdev->irqlock); + mutex_init(&vdev->ioeventfds_lock); + INIT_LIST_HEAD(&vdev->dummy_resources_list); + INIT_LIST_HEAD(&vdev->ioeventfds_list); + mutex_init(&vdev->vma_lock); + INIT_LIST_HEAD(&vdev->vma_list); + init_rwsem(&vdev->memory_lock); + + ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); + if (ret) { + vfio_iommu_group_put(group, &pdev->dev); + kfree(vdev); + return ret; + } + + if (vfio_pci_is_vga(pdev)) { + vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode); + vga_set_legacy_decoding(pdev, + vfio_pci_set_vga_decode(vdev, false)); + } + + if (!disable_idle_d3) { + /* + * pci-core sets the device power state to an unknown value at + * bootup and after being removed from a driver. The only + * transition it allows from this unknown state is to D0, which + * typically happens when a driver calls pci_enable_device(). + * We're not ready to enable the device yet, but we do want to + * be able to get to D3. Therefore first do a D0 transition + * before going to D3. + */ + pci_set_power_state(pdev, PCI_D0); + pci_set_power_state(pdev, PCI_D3hot); + } + + return ret; +} + +static void vfio_pci_remove(struct pci_dev *pdev) +{ + struct vfio_pci_device *vdev; + + vdev = vfio_del_group_dev(&pdev->dev); + if (!vdev) + return; + + vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev); + kfree(vdev->region); + mutex_destroy(&vdev->ioeventfds_lock); + kfree(vdev); + + if (vfio_pci_is_vga(pdev)) { + vga_client_register(pdev, NULL, NULL, NULL); + vga_set_legacy_decoding(pdev, + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM | + VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM); + } + + if (!disable_idle_d3) + pci_set_power_state(pdev, PCI_D0); +} + +static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct vfio_pci_device *vdev; + struct vfio_device *device; + + device = vfio_device_get_from_dev(&pdev->dev); + if (device == NULL) + return PCI_ERS_RESULT_DISCONNECT; + + vdev = vfio_device_data(device); + if (vdev == NULL) { + vfio_device_put(device); + return PCI_ERS_RESULT_DISCONNECT; + } + + mutex_lock(&vdev->igate); + + if (vdev->err_trigger) + eventfd_signal(vdev->err_trigger, 1); + + mutex_unlock(&vdev->igate); + + vfio_device_put(device); + + return PCI_ERS_RESULT_CAN_RECOVER; +} + +static const struct pci_error_handlers vfio_err_handlers = { + .error_detected = vfio_pci_aer_err_detected, +}; + +static struct pci_driver vfio_pci_driver = { + .name = "vfio-pci", + .id_table = NULL, /* only dynamic ids */ + .probe = vfio_pci_probe, + .remove = vfio_pci_remove, + .err_handler = &vfio_err_handlers, +}; + +static int vfio_pci_get_devs(struct pci_dev *pdev, void *data) +{ + struct vfio_devices *devs = data; + struct vfio_device *device; + + if (devs->cur_index == devs->max_index) + return -ENOSPC; + + device = vfio_device_get_from_dev(&pdev->dev); + if (!device) + return -EINVAL; + + if (pci_dev_driver(pdev) != &vfio_pci_driver) { + vfio_device_put(device); + return -EBUSY; + } + + devs->devices[devs->cur_index++] = device; + return 0; +} + +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data) +{ + struct vfio_devices *devs = data; + struct vfio_device *device; + struct vfio_pci_device *vdev; + + if (devs->cur_index == devs->max_index) + return -ENOSPC; + + device = vfio_device_get_from_dev(&pdev->dev); + if (!device) + return -EINVAL; + + if (pci_dev_driver(pdev) != &vfio_pci_driver) { + vfio_device_put(device); + return -EBUSY; + } + + vdev = vfio_device_data(device); + + /* + * Locking multiple devices is prone to deadlock, runaway and + * unwind if we hit contention. + */ + if (!vfio_pci_zap_and_vma_lock(vdev, true)) { + vfio_device_put(device); + return -EBUSY; + } + + devs->devices[devs->cur_index++] = device; + return 0; +} + +/* + * Attempt to do a bus/slot reset if there are devices affected by a reset for + * this device that are needs_reset and all of the affected devices are unused + * (!refcnt). Callers are required to hold driver_lock when calling this to + * prevent device opens and concurrent bus reset attempts. We prevent device + * unbinds by acquiring and holding a reference to the vfio_device. + * + * NB: vfio-core considers a group to be viable even if some devices are + * bound to drivers like pci-stub or pcieport. Here we require all devices + * to be bound to vfio_pci since that's the only way we can be sure they + * stay put. + */ +static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev) +{ + struct vfio_devices devs = { .cur_index = 0 }; + int i = 0, ret = -EINVAL; + bool needs_reset = false, slot = false; + struct vfio_pci_device *tmp; + + if (!pci_probe_reset_slot(vdev->pdev->slot)) + slot = true; + else if (pci_probe_reset_bus(vdev->pdev->bus)) + return; + + if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs, + &i, slot) || !i) + return; + + devs.max_index = i; + devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL); + if (!devs.devices) + return; + + if (vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_get_devs, &devs, slot)) + goto put_devs; + + for (i = 0; i < devs.cur_index; i++) { + tmp = vfio_device_data(devs.devices[i]); + if (tmp->needs_reset) + needs_reset = true; + if (tmp->refcnt) + goto put_devs; + } + + if (needs_reset) + ret = pci_reset_bus(vdev->pdev); + +put_devs: + for (i = 0; i < devs.cur_index; i++) { + tmp = vfio_device_data(devs.devices[i]); + if (!ret) + tmp->needs_reset = false; + + if (!tmp->refcnt && !disable_idle_d3) + pci_set_power_state(tmp->pdev, PCI_D3hot); + + vfio_device_put(devs.devices[i]); + } + + kfree(devs.devices); +} + +static void __exit vfio_pci_cleanup(void) +{ + pci_unregister_driver(&vfio_pci_driver); + vfio_pci_uninit_perm_bits(); +} + +static void __init vfio_pci_fill_ids(void) +{ + char *p, *id; + int rc; + + /* no ids passed actually */ + if (ids[0] == '\0') + return; + + /* add ids specified in the module parameter */ + p = ids; + while ((id = strsep(&p, ","))) { + unsigned int vendor, device, subvendor = PCI_ANY_ID, + subdevice = PCI_ANY_ID, class = 0, class_mask = 0; + int fields; + + if (!strlen(id)) + continue; + + fields = sscanf(id, "%x:%x:%x:%x:%x:%x", + &vendor, &device, &subvendor, &subdevice, + &class, &class_mask); + + if (fields < 2) { + pr_warn("invalid id string \"%s\"\n", id); + continue; + } + + rc = pci_add_dynid(&vfio_pci_driver, vendor, device, + subvendor, subdevice, class, class_mask, 0); + if (rc) + pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n", + vendor, device, subvendor, subdevice, + class, class_mask, rc); + else + pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n", + vendor, device, subvendor, subdevice, + class, class_mask); + } +} + +static int __init vfio_pci_init(void) +{ + int ret; + + /* Allocate shared config space permision data used by all devices */ + ret = vfio_pci_init_perm_bits(); + if (ret) + return ret; + + /* Register and scan for devices */ + ret = pci_register_driver(&vfio_pci_driver); + if (ret) + goto out_driver; + + vfio_pci_fill_ids(); + + return 0; + +out_driver: + vfio_pci_uninit_perm_bits(); + return ret; +} + +module_init(vfio_pci_init); +module_exit(vfio_pci_cleanup); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c new file mode 100644 index 000000000..86e917f1c --- /dev/null +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -0,0 +1,1908 @@ +/* + * VFIO PCI config space virtualization + * + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + */ + +/* + * This code handles reading and writing of PCI configuration registers. + * This is hairy because we want to allow a lot of flexibility to the + * user driver, but cannot trust it with all of the config fields. + * Tables determine which fields can be read and written, as well as + * which fields are 'virtualized' - special actions and translations to + * make it appear to the user that he has control, when in fact things + * must be negotiated with the underlying OS. + */ + +#include <linux/fs.h> +#include <linux/pci.h> +#include <linux/uaccess.h> +#include <linux/vfio.h> +#include <linux/slab.h> + +#include "vfio_pci_private.h" + +/* Fake capability ID for standard config space */ +#define PCI_CAP_ID_BASIC 0 + +#define is_bar(offset) \ + ((offset >= PCI_BASE_ADDRESS_0 && offset < PCI_BASE_ADDRESS_5 + 4) || \ + (offset >= PCI_ROM_ADDRESS && offset < PCI_ROM_ADDRESS + 4)) + +/* + * Lengths of PCI Config Capabilities + * 0: Removed from the user visible capability list + * FF: Variable length + */ +static const u8 pci_cap_length[PCI_CAP_ID_MAX + 1] = { + [PCI_CAP_ID_BASIC] = PCI_STD_HEADER_SIZEOF, /* pci config header */ + [PCI_CAP_ID_PM] = PCI_PM_SIZEOF, + [PCI_CAP_ID_AGP] = PCI_AGP_SIZEOF, + [PCI_CAP_ID_VPD] = PCI_CAP_VPD_SIZEOF, + [PCI_CAP_ID_SLOTID] = 0, /* bridge - don't care */ + [PCI_CAP_ID_MSI] = 0xFF, /* 10, 14, 20, or 24 */ + [PCI_CAP_ID_CHSWP] = 0, /* cpci - not yet */ + [PCI_CAP_ID_PCIX] = 0xFF, /* 8 or 24 */ + [PCI_CAP_ID_HT] = 0xFF, /* hypertransport */ + [PCI_CAP_ID_VNDR] = 0xFF, /* variable */ + [PCI_CAP_ID_DBG] = 0, /* debug - don't care */ + [PCI_CAP_ID_CCRC] = 0, /* cpci - not yet */ + [PCI_CAP_ID_SHPC] = 0, /* hotswap - not yet */ + [PCI_CAP_ID_SSVID] = 0, /* bridge - don't care */ + [PCI_CAP_ID_AGP3] = 0, /* AGP8x - not yet */ + [PCI_CAP_ID_SECDEV] = 0, /* secure device not yet */ + [PCI_CAP_ID_EXP] = 0xFF, /* 20 or 44 */ + [PCI_CAP_ID_MSIX] = PCI_CAP_MSIX_SIZEOF, + [PCI_CAP_ID_SATA] = 0xFF, + [PCI_CAP_ID_AF] = PCI_CAP_AF_SIZEOF, +}; + +/* + * Lengths of PCIe/PCI-X Extended Config Capabilities + * 0: Removed or masked from the user visible capability list + * FF: Variable length + */ +static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = { + [PCI_EXT_CAP_ID_ERR] = PCI_ERR_ROOT_COMMAND, + [PCI_EXT_CAP_ID_VC] = 0xFF, + [PCI_EXT_CAP_ID_DSN] = PCI_EXT_CAP_DSN_SIZEOF, + [PCI_EXT_CAP_ID_PWR] = PCI_EXT_CAP_PWR_SIZEOF, + [PCI_EXT_CAP_ID_RCLD] = 0, /* root only - don't care */ + [PCI_EXT_CAP_ID_RCILC] = 0, /* root only - don't care */ + [PCI_EXT_CAP_ID_RCEC] = 0, /* root only - don't care */ + [PCI_EXT_CAP_ID_MFVC] = 0xFF, + [PCI_EXT_CAP_ID_VC9] = 0xFF, /* same as CAP_ID_VC */ + [PCI_EXT_CAP_ID_RCRB] = 0, /* root only - don't care */ + [PCI_EXT_CAP_ID_VNDR] = 0xFF, + [PCI_EXT_CAP_ID_CAC] = 0, /* obsolete */ + [PCI_EXT_CAP_ID_ACS] = 0xFF, + [PCI_EXT_CAP_ID_ARI] = PCI_EXT_CAP_ARI_SIZEOF, + [PCI_EXT_CAP_ID_ATS] = PCI_EXT_CAP_ATS_SIZEOF, + [PCI_EXT_CAP_ID_SRIOV] = PCI_EXT_CAP_SRIOV_SIZEOF, + [PCI_EXT_CAP_ID_MRIOV] = 0, /* not yet */ + [PCI_EXT_CAP_ID_MCAST] = PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF, + [PCI_EXT_CAP_ID_PRI] = PCI_EXT_CAP_PRI_SIZEOF, + [PCI_EXT_CAP_ID_AMD_XXX] = 0, /* not yet */ + [PCI_EXT_CAP_ID_REBAR] = 0xFF, + [PCI_EXT_CAP_ID_DPA] = 0xFF, + [PCI_EXT_CAP_ID_TPH] = 0xFF, + [PCI_EXT_CAP_ID_LTR] = PCI_EXT_CAP_LTR_SIZEOF, + [PCI_EXT_CAP_ID_SECPCI] = 0, /* not yet */ + [PCI_EXT_CAP_ID_PMUX] = 0, /* not yet */ + [PCI_EXT_CAP_ID_PASID] = 0, /* not yet */ +}; + +/* + * Read/Write Permission Bits - one bit for each bit in capability + * Any field can be read if it exists, but what is read depends on + * whether the field is 'virtualized', or just pass thru to the + * hardware. Any virtualized field is also virtualized for writes. + * Writes are only permitted if they have a 1 bit here. + */ +struct perm_bits { + u8 *virt; /* read/write virtual data, not hw */ + u8 *write; /* writeable bits */ + int (*readfn)(struct vfio_pci_device *vdev, int pos, int count, + struct perm_bits *perm, int offset, __le32 *val); + int (*writefn)(struct vfio_pci_device *vdev, int pos, int count, + struct perm_bits *perm, int offset, __le32 val); +}; + +#define NO_VIRT 0 +#define ALL_VIRT 0xFFFFFFFFU +#define NO_WRITE 0 +#define ALL_WRITE 0xFFFFFFFFU + +static int vfio_user_config_read(struct pci_dev *pdev, int offset, + __le32 *val, int count) +{ + int ret = -EINVAL; + u32 tmp_val = 0; + + switch (count) { + case 1: + { + u8 tmp; + ret = pci_user_read_config_byte(pdev, offset, &tmp); + tmp_val = tmp; + break; + } + case 2: + { + u16 tmp; + ret = pci_user_read_config_word(pdev, offset, &tmp); + tmp_val = tmp; + break; + } + case 4: + ret = pci_user_read_config_dword(pdev, offset, &tmp_val); + break; + } + + *val = cpu_to_le32(tmp_val); + + return ret; +} + +static int vfio_user_config_write(struct pci_dev *pdev, int offset, + __le32 val, int count) +{ + int ret = -EINVAL; + u32 tmp_val = le32_to_cpu(val); + + switch (count) { + case 1: + ret = pci_user_write_config_byte(pdev, offset, tmp_val); + break; + case 2: + ret = pci_user_write_config_word(pdev, offset, tmp_val); + break; + case 4: + ret = pci_user_write_config_dword(pdev, offset, tmp_val); + break; + } + + return ret; +} + +static int vfio_default_config_read(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 *val) +{ + __le32 virt = 0; + + memcpy(val, vdev->vconfig + pos, count); + + memcpy(&virt, perm->virt + offset, count); + + /* Any non-virtualized bits? */ + if (cpu_to_le32(~0U >> (32 - (count * 8))) != virt) { + struct pci_dev *pdev = vdev->pdev; + __le32 phys_val = 0; + int ret; + + ret = vfio_user_config_read(pdev, pos, &phys_val, count); + if (ret) + return ret; + + *val = (phys_val & ~virt) | (*val & virt); + } + + return count; +} + +static int vfio_default_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + __le32 virt = 0, write = 0; + + memcpy(&write, perm->write + offset, count); + + if (!write) + return count; /* drop, no writable bits */ + + memcpy(&virt, perm->virt + offset, count); + + /* Virtualized and writable bits go to vconfig */ + if (write & virt) { + __le32 virt_val = 0; + + memcpy(&virt_val, vdev->vconfig + pos, count); + + virt_val &= ~(write & virt); + virt_val |= (val & (write & virt)); + + memcpy(vdev->vconfig + pos, &virt_val, count); + } + + /* Non-virtualzed and writable bits go to hardware */ + if (write & ~virt) { + struct pci_dev *pdev = vdev->pdev; + __le32 phys_val = 0; + int ret; + + ret = vfio_user_config_read(pdev, pos, &phys_val, count); + if (ret) + return ret; + + phys_val &= ~(write & ~virt); + phys_val |= (val & (write & ~virt)); + + ret = vfio_user_config_write(pdev, pos, phys_val, count); + if (ret) + return ret; + } + + return count; +} + +/* Allow direct read from hardware, except for capability next pointer */ +static int vfio_direct_config_read(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 *val) +{ + int ret; + + ret = vfio_user_config_read(vdev->pdev, pos, val, count); + if (ret) + return ret; + + if (pos >= PCI_CFG_SPACE_SIZE) { /* Extended cap header mangling */ + if (offset < 4) + memcpy(val, vdev->vconfig + pos, count); + } else if (pos >= PCI_STD_HEADER_SIZEOF) { /* Std cap mangling */ + if (offset == PCI_CAP_LIST_ID && count > 1) + memcpy(val, vdev->vconfig + pos, + min(PCI_CAP_FLAGS, count)); + else if (offset == PCI_CAP_LIST_NEXT) + memcpy(val, vdev->vconfig + pos, 1); + } + + return count; +} + +/* Raw access skips any kind of virtualization */ +static int vfio_raw_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + int ret; + + ret = vfio_user_config_write(vdev->pdev, pos, val, count); + if (ret) + return ret; + + return count; +} + +static int vfio_raw_config_read(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 *val) +{ + int ret; + + ret = vfio_user_config_read(vdev->pdev, pos, val, count); + if (ret) + return ret; + + return count; +} + +/* Virt access uses only virtualization */ +static int vfio_virt_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + memcpy(vdev->vconfig + pos, &val, count); + return count; +} + +static int vfio_virt_config_read(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 *val) +{ + memcpy(val, vdev->vconfig + pos, count); + return count; +} + +/* Default capability regions to read-only, no-virtualization */ +static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = { + [0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read } +}; +static struct perm_bits ecap_perms[PCI_EXT_CAP_ID_MAX + 1] = { + [0 ... PCI_EXT_CAP_ID_MAX] = { .readfn = vfio_direct_config_read } +}; +/* + * Default unassigned regions to raw read-write access. Some devices + * require this to function as they hide registers between the gaps in + * config space (be2net). Like MMIO and I/O port registers, we have + * to trust the hardware isolation. + */ +static struct perm_bits unassigned_perms = { + .readfn = vfio_raw_config_read, + .writefn = vfio_raw_config_write +}; + +static struct perm_bits virt_perms = { + .readfn = vfio_virt_config_read, + .writefn = vfio_virt_config_write +}; + +static void free_perm_bits(struct perm_bits *perm) +{ + kfree(perm->virt); + kfree(perm->write); + perm->virt = NULL; + perm->write = NULL; +} + +static int alloc_perm_bits(struct perm_bits *perm, int size) +{ + /* + * Round up all permission bits to the next dword, this lets us + * ignore whether a read/write exceeds the defined capability + * structure. We can do this because: + * - Standard config space is already dword aligned + * - Capabilities are all dword aligned (bits 0:1 of next reserved) + * - Express capabilities defined as dword aligned + */ + size = round_up(size, 4); + + /* + * Zero state is + * - All Readable, None Writeable, None Virtualized + */ + perm->virt = kzalloc(size, GFP_KERNEL); + perm->write = kzalloc(size, GFP_KERNEL); + if (!perm->virt || !perm->write) { + free_perm_bits(perm); + return -ENOMEM; + } + + perm->readfn = vfio_default_config_read; + perm->writefn = vfio_default_config_write; + + return 0; +} + +/* + * Helper functions for filling in permission tables + */ +static inline void p_setb(struct perm_bits *p, int off, u8 virt, u8 write) +{ + p->virt[off] = virt; + p->write[off] = write; +} + +/* Handle endian-ness - pci and tables are little-endian */ +static inline void p_setw(struct perm_bits *p, int off, u16 virt, u16 write) +{ + *(__le16 *)(&p->virt[off]) = cpu_to_le16(virt); + *(__le16 *)(&p->write[off]) = cpu_to_le16(write); +} + +/* Handle endian-ness - pci and tables are little-endian */ +static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) +{ + *(__le32 *)(&p->virt[off]) = cpu_to_le32(virt); + *(__le32 *)(&p->write[off]) = cpu_to_le32(write); +} + +/* Caller should hold memory_lock semaphore */ +bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); + + /* + * SR-IOV VF memory enable is handled by the MSE bit in the + * PF SR-IOV capability, there's therefore no need to trigger + * faults based on the virtual value. + */ + return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY); +} + +/* + * Restore the *real* BARs after we detect a FLR or backdoor reset. + * (backdoor = some device specific technique that we didn't catch) + */ +static void vfio_bar_restore(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + u32 *rbar = vdev->rbar; + u16 cmd; + int i; + + if (pdev->is_virtfn) + return; + + pr_info("%s: %s reset recovery - restoring bars\n", + __func__, dev_name(&pdev->dev)); + + for (i = PCI_BASE_ADDRESS_0; i <= PCI_BASE_ADDRESS_5; i += 4, rbar++) + pci_user_write_config_dword(pdev, i, *rbar); + + pci_user_write_config_dword(pdev, PCI_ROM_ADDRESS, *rbar); + + if (vdev->nointx) { + pci_user_read_config_word(pdev, PCI_COMMAND, &cmd); + cmd |= PCI_COMMAND_INTX_DISABLE; + pci_user_write_config_word(pdev, PCI_COMMAND, cmd); + } +} + +static __le32 vfio_generate_bar_flags(struct pci_dev *pdev, int bar) +{ + unsigned long flags = pci_resource_flags(pdev, bar); + u32 val; + + if (flags & IORESOURCE_IO) + return cpu_to_le32(PCI_BASE_ADDRESS_SPACE_IO); + + val = PCI_BASE_ADDRESS_SPACE_MEMORY; + + if (flags & IORESOURCE_PREFETCH) + val |= PCI_BASE_ADDRESS_MEM_PREFETCH; + + if (flags & IORESOURCE_MEM_64) + val |= PCI_BASE_ADDRESS_MEM_TYPE_64; + + return cpu_to_le32(val); +} + +/* + * Pretend we're hardware and tweak the values of the *virtual* PCI BARs + * to reflect the hardware capabilities. This implements BAR sizing. + */ +static void vfio_bar_fixup(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + int i; + __le32 *bar; + u64 mask; + + bar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0]; + + for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++, bar++) { + if (!pci_resource_start(pdev, i)) { + *bar = 0; /* Unmapped by host = unimplemented to user */ + continue; + } + + mask = ~(pci_resource_len(pdev, i) - 1); + + *bar &= cpu_to_le32((u32)mask); + *bar |= vfio_generate_bar_flags(pdev, i); + + if (*bar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) { + bar++; + *bar &= cpu_to_le32((u32)(mask >> 32)); + i++; + } + } + + bar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS]; + + /* + * NB. REGION_INFO will have reported zero size if we weren't able + * to read the ROM, but we still return the actual BAR size here if + * it exists (or the shadow ROM space). + */ + if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) { + mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1); + mask |= PCI_ROM_ADDRESS_ENABLE; + *bar &= cpu_to_le32((u32)mask); + } else if (pdev->resource[PCI_ROM_RESOURCE].flags & + IORESOURCE_ROM_SHADOW) { + mask = ~(0x20000 - 1); + mask |= PCI_ROM_ADDRESS_ENABLE; + *bar &= cpu_to_le32((u32)mask); + } else + *bar = 0; + + vdev->bardirty = false; +} + +static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 *val) +{ + if (is_bar(offset)) /* pos == offset for basic config */ + vfio_bar_fixup(vdev); + + count = vfio_default_config_read(vdev, pos, count, perm, offset, val); + + /* Mask in virtual memory enable for SR-IOV devices */ + if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) { + u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); + u32 tmp_val = le32_to_cpu(*val); + + tmp_val |= cmd & PCI_COMMAND_MEMORY; + *val = cpu_to_le32(tmp_val); + } + + return count; +} + +/* Test whether BARs match the value we think they should contain */ +static bool vfio_need_bar_restore(struct vfio_pci_device *vdev) +{ + int i = 0, pos = PCI_BASE_ADDRESS_0, ret; + u32 bar; + + for (; pos <= PCI_BASE_ADDRESS_5; i++, pos += 4) { + if (vdev->rbar[i]) { + ret = pci_user_read_config_dword(vdev->pdev, pos, &bar); + if (ret || vdev->rbar[i] != bar) + return true; + } + } + + return false; +} + +static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + struct pci_dev *pdev = vdev->pdev; + __le16 *virt_cmd; + u16 new_cmd = 0; + int ret; + + virt_cmd = (__le16 *)&vdev->vconfig[PCI_COMMAND]; + + if (offset == PCI_COMMAND) { + bool phys_mem, virt_mem, new_mem, phys_io, virt_io, new_io; + u16 phys_cmd; + + ret = pci_user_read_config_word(pdev, PCI_COMMAND, &phys_cmd); + if (ret) + return ret; + + new_cmd = le32_to_cpu(val); + + phys_io = !!(phys_cmd & PCI_COMMAND_IO); + virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); + new_io = !!(new_cmd & PCI_COMMAND_IO); + + phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY); + virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY); + new_mem = !!(new_cmd & PCI_COMMAND_MEMORY); + + if (!new_mem) + vfio_pci_zap_and_down_write_memory_lock(vdev); + else + down_write(&vdev->memory_lock); + + /* + * If the user is writing mem/io enable (new_mem/io) and we + * think it's already enabled (virt_mem/io), but the hardware + * shows it disabled (phys_mem/io, then the device has + * undergone some kind of backdoor reset and needs to be + * restored before we allow it to enable the bars. + * SR-IOV devices will trigger this, but we catch them later + */ + if ((new_mem && virt_mem && !phys_mem) || + (new_io && virt_io && !phys_io) || + vfio_need_bar_restore(vdev)) + vfio_bar_restore(vdev); + } + + count = vfio_default_config_write(vdev, pos, count, perm, offset, val); + if (count < 0) { + if (offset == PCI_COMMAND) + up_write(&vdev->memory_lock); + return count; + } + + /* + * Save current memory/io enable bits in vconfig to allow for + * the test above next time. + */ + if (offset == PCI_COMMAND) { + u16 mask = PCI_COMMAND_MEMORY | PCI_COMMAND_IO; + + *virt_cmd &= cpu_to_le16(~mask); + *virt_cmd |= cpu_to_le16(new_cmd & mask); + + up_write(&vdev->memory_lock); + } + + /* Emulate INTx disable */ + if (offset >= PCI_COMMAND && offset <= PCI_COMMAND + 1) { + bool virt_intx_disable; + + virt_intx_disable = !!(le16_to_cpu(*virt_cmd) & + PCI_COMMAND_INTX_DISABLE); + + if (virt_intx_disable && !vdev->virq_disabled) { + vdev->virq_disabled = true; + vfio_pci_intx_mask(vdev); + } else if (!virt_intx_disable && vdev->virq_disabled) { + vdev->virq_disabled = false; + vfio_pci_intx_unmask(vdev); + } + } + + if (is_bar(offset)) + vdev->bardirty = true; + + return count; +} + +/* Permissions for the Basic PCI Header */ +static int __init init_pci_cap_basic_perm(struct perm_bits *perm) +{ + if (alloc_perm_bits(perm, PCI_STD_HEADER_SIZEOF)) + return -ENOMEM; + + perm->readfn = vfio_basic_config_read; + perm->writefn = vfio_basic_config_write; + + /* Virtualized for SR-IOV functions, which just have FFFF */ + p_setw(perm, PCI_VENDOR_ID, (u16)ALL_VIRT, NO_WRITE); + p_setw(perm, PCI_DEVICE_ID, (u16)ALL_VIRT, NO_WRITE); + + /* + * Virtualize INTx disable, we use it internally for interrupt + * control and can emulate it for non-PCI 2.3 devices. + */ + p_setw(perm, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE, (u16)ALL_WRITE); + + /* Virtualize capability list, we might want to skip/disable */ + p_setw(perm, PCI_STATUS, PCI_STATUS_CAP_LIST, NO_WRITE); + + /* No harm to write */ + p_setb(perm, PCI_CACHE_LINE_SIZE, NO_VIRT, (u8)ALL_WRITE); + p_setb(perm, PCI_LATENCY_TIMER, NO_VIRT, (u8)ALL_WRITE); + p_setb(perm, PCI_BIST, NO_VIRT, (u8)ALL_WRITE); + + /* Virtualize all bars, can't touch the real ones */ + p_setd(perm, PCI_BASE_ADDRESS_0, ALL_VIRT, ALL_WRITE); + p_setd(perm, PCI_BASE_ADDRESS_1, ALL_VIRT, ALL_WRITE); + p_setd(perm, PCI_BASE_ADDRESS_2, ALL_VIRT, ALL_WRITE); + p_setd(perm, PCI_BASE_ADDRESS_3, ALL_VIRT, ALL_WRITE); + p_setd(perm, PCI_BASE_ADDRESS_4, ALL_VIRT, ALL_WRITE); + p_setd(perm, PCI_BASE_ADDRESS_5, ALL_VIRT, ALL_WRITE); + p_setd(perm, PCI_ROM_ADDRESS, ALL_VIRT, ALL_WRITE); + + /* Allow us to adjust capability chain */ + p_setb(perm, PCI_CAPABILITY_LIST, (u8)ALL_VIRT, NO_WRITE); + + /* Sometimes used by sw, just virtualize */ + p_setb(perm, PCI_INTERRUPT_LINE, (u8)ALL_VIRT, (u8)ALL_WRITE); + + /* Virtualize interrupt pin to allow hiding INTx */ + p_setb(perm, PCI_INTERRUPT_PIN, (u8)ALL_VIRT, (u8)NO_WRITE); + + return 0; +} + +static int vfio_pm_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + count = vfio_default_config_write(vdev, pos, count, perm, offset, val); + if (count < 0) + return count; + + if (offset == PCI_PM_CTRL) { + pci_power_t state; + + switch (le32_to_cpu(val) & PCI_PM_CTRL_STATE_MASK) { + case 0: + state = PCI_D0; + break; + case 1: + state = PCI_D1; + break; + case 2: + state = PCI_D2; + break; + case 3: + state = PCI_D3hot; + break; + } + + pci_set_power_state(vdev->pdev, state); + } + + return count; +} + +/* Permissions for the Power Management capability */ +static int __init init_pci_cap_pm_perm(struct perm_bits *perm) +{ + if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_PM])) + return -ENOMEM; + + perm->writefn = vfio_pm_config_write; + + /* + * We always virtualize the next field so we can remove + * capabilities from the chain if we want to. + */ + p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); + + /* + * Power management is defined *per function*, so we can let + * the user change power state, but we trap and initiate the + * change ourselves, so the state bits are read-only. + */ + p_setd(perm, PCI_PM_CTRL, NO_VIRT, ~PCI_PM_CTRL_STATE_MASK); + return 0; +} + +static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + struct pci_dev *pdev = vdev->pdev; + __le16 *paddr = (__le16 *)(vdev->vconfig + pos - offset + PCI_VPD_ADDR); + __le32 *pdata = (__le32 *)(vdev->vconfig + pos - offset + PCI_VPD_DATA); + u16 addr; + u32 data; + + /* + * Write through to emulation. If the write includes the upper byte + * of PCI_VPD_ADDR, then the PCI_VPD_ADDR_F bit is written and we + * have work to do. + */ + count = vfio_default_config_write(vdev, pos, count, perm, offset, val); + if (count < 0 || offset > PCI_VPD_ADDR + 1 || + offset + count <= PCI_VPD_ADDR + 1) + return count; + + addr = le16_to_cpu(*paddr); + + if (addr & PCI_VPD_ADDR_F) { + data = le32_to_cpu(*pdata); + if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4) + return count; + } else { + data = 0; + if (pci_read_vpd(pdev, addr, 4, &data) < 0) + return count; + *pdata = cpu_to_le32(data); + } + + /* + * Toggle PCI_VPD_ADDR_F in the emulated PCI_VPD_ADDR register to + * signal completion. If an error occurs above, we assume that not + * toggling this bit will induce a driver timeout. + */ + addr ^= PCI_VPD_ADDR_F; + *paddr = cpu_to_le16(addr); + + return count; +} + +/* Permissions for Vital Product Data capability */ +static int __init init_pci_cap_vpd_perm(struct perm_bits *perm) +{ + if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_VPD])) + return -ENOMEM; + + perm->writefn = vfio_vpd_config_write; + + /* + * We always virtualize the next field so we can remove + * capabilities from the chain if we want to. + */ + p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); + + /* + * Both the address and data registers are virtualized to + * enable access through the pci_vpd_read/write functions + */ + p_setw(perm, PCI_VPD_ADDR, (u16)ALL_VIRT, (u16)ALL_WRITE); + p_setd(perm, PCI_VPD_DATA, ALL_VIRT, ALL_WRITE); + + return 0; +} + +/* Permissions for PCI-X capability */ +static int __init init_pci_cap_pcix_perm(struct perm_bits *perm) +{ + /* Alloc 24, but only 8 are used in v0 */ + if (alloc_perm_bits(perm, PCI_CAP_PCIX_SIZEOF_V2)) + return -ENOMEM; + + p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); + + p_setw(perm, PCI_X_CMD, NO_VIRT, (u16)ALL_WRITE); + p_setd(perm, PCI_X_ECC_CSR, NO_VIRT, ALL_WRITE); + return 0; +} + +static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + __le16 *ctrl = (__le16 *)(vdev->vconfig + pos - + offset + PCI_EXP_DEVCTL); + int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ; + + count = vfio_default_config_write(vdev, pos, count, perm, offset, val); + if (count < 0) + return count; + + /* + * The FLR bit is virtualized, if set and the device supports PCIe + * FLR, issue a reset_function. Regardless, clear the bit, the spec + * requires it to be always read as zero. NB, reset_function might + * not use a PCIe FLR, we don't have that level of granularity. + */ + if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) { + u32 cap; + int ret; + + *ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR); + + ret = pci_user_read_config_dword(vdev->pdev, + pos - offset + PCI_EXP_DEVCAP, + &cap); + + if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); + pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } + } + + /* + * MPS is virtualized to the user, writes do not change the physical + * register since determining a proper MPS value requires a system wide + * device view. The MRRS is largely independent of MPS, but since the + * user does not have that system-wide view, they might set a safe, but + * inefficiently low value. Here we allow writes through to hardware, + * but we set the floor to the physical device MPS setting, so that + * we can at least use full TLPs, as defined by the MPS value. + * + * NB, if any devices actually depend on an artificially low MRRS + * setting, this will need to be revisited, perhaps with a quirk + * though pcie_set_readrq(). + */ + if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) { + readrq = 128 << + ((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12); + readrq = max(readrq, pcie_get_mps(vdev->pdev)); + + pcie_set_readrq(vdev->pdev, readrq); + } + + return count; +} + +/* Permissions for PCI Express capability */ +static int __init init_pci_cap_exp_perm(struct perm_bits *perm) +{ + /* Alloc largest of possible sizes */ + if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2)) + return -ENOMEM; + + perm->writefn = vfio_exp_config_write; + + p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); + + /* + * Allow writes to device control fields, except devctl_phantom, + * which could confuse IOMMU, MPS, which can break communication + * with other physical devices, and the ARI bit in devctl2, which + * is set at probe time. FLR and MRRS get virtualized via our + * writefn. + */ + p_setw(perm, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD | + PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM); + p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI); + return 0; +} + +static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL; + + count = vfio_default_config_write(vdev, pos, count, perm, offset, val); + if (count < 0) + return count; + + /* + * The FLR bit is virtualized, if set and the device supports AF + * FLR, issue a reset_function. Regardless, clear the bit, the spec + * requires it to be always read as zero. NB, reset_function might + * not use an AF FLR, we don't have that level of granularity. + */ + if (*ctrl & PCI_AF_CTRL_FLR) { + u8 cap; + int ret; + + *ctrl &= ~PCI_AF_CTRL_FLR; + + ret = pci_user_read_config_byte(vdev->pdev, + pos - offset + PCI_AF_CAP, + &cap); + + if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); + pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } + } + + return count; +} + +/* Permissions for Advanced Function capability */ +static int __init init_pci_cap_af_perm(struct perm_bits *perm) +{ + if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF])) + return -ENOMEM; + + perm->writefn = vfio_af_config_write; + + p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); + p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR); + return 0; +} + +/* Permissions for Advanced Error Reporting extended capability */ +static int __init init_pci_ext_cap_err_perm(struct perm_bits *perm) +{ + u32 mask; + + if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_ERR])) + return -ENOMEM; + + /* + * Virtualize the first dword of all express capabilities + * because it includes the next pointer. This lets us later + * remove capabilities from the chain if we need to. + */ + p_setd(perm, 0, ALL_VIRT, NO_WRITE); + + /* Writable bits mask */ + mask = PCI_ERR_UNC_UND | /* Undefined */ + PCI_ERR_UNC_DLP | /* Data Link Protocol */ + PCI_ERR_UNC_SURPDN | /* Surprise Down */ + PCI_ERR_UNC_POISON_TLP | /* Poisoned TLP */ + PCI_ERR_UNC_FCP | /* Flow Control Protocol */ + PCI_ERR_UNC_COMP_TIME | /* Completion Timeout */ + PCI_ERR_UNC_COMP_ABORT | /* Completer Abort */ + PCI_ERR_UNC_UNX_COMP | /* Unexpected Completion */ + PCI_ERR_UNC_RX_OVER | /* Receiver Overflow */ + PCI_ERR_UNC_MALF_TLP | /* Malformed TLP */ + PCI_ERR_UNC_ECRC | /* ECRC Error Status */ + PCI_ERR_UNC_UNSUP | /* Unsupported Request */ + PCI_ERR_UNC_ACSV | /* ACS Violation */ + PCI_ERR_UNC_INTN | /* internal error */ + PCI_ERR_UNC_MCBTLP | /* MC blocked TLP */ + PCI_ERR_UNC_ATOMEG | /* Atomic egress blocked */ + PCI_ERR_UNC_TLPPRE; /* TLP prefix blocked */ + p_setd(perm, PCI_ERR_UNCOR_STATUS, NO_VIRT, mask); + p_setd(perm, PCI_ERR_UNCOR_MASK, NO_VIRT, mask); + p_setd(perm, PCI_ERR_UNCOR_SEVER, NO_VIRT, mask); + + mask = PCI_ERR_COR_RCVR | /* Receiver Error Status */ + PCI_ERR_COR_BAD_TLP | /* Bad TLP Status */ + PCI_ERR_COR_BAD_DLLP | /* Bad DLLP Status */ + PCI_ERR_COR_REP_ROLL | /* REPLAY_NUM Rollover */ + PCI_ERR_COR_REP_TIMER | /* Replay Timer Timeout */ + PCI_ERR_COR_ADV_NFAT | /* Advisory Non-Fatal */ + PCI_ERR_COR_INTERNAL | /* Corrected Internal */ + PCI_ERR_COR_LOG_OVER; /* Header Log Overflow */ + p_setd(perm, PCI_ERR_COR_STATUS, NO_VIRT, mask); + p_setd(perm, PCI_ERR_COR_MASK, NO_VIRT, mask); + + mask = PCI_ERR_CAP_ECRC_GENE | /* ECRC Generation Enable */ + PCI_ERR_CAP_ECRC_CHKE; /* ECRC Check Enable */ + p_setd(perm, PCI_ERR_CAP, NO_VIRT, mask); + return 0; +} + +/* Permissions for Power Budgeting extended capability */ +static int __init init_pci_ext_cap_pwr_perm(struct perm_bits *perm) +{ + if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_PWR])) + return -ENOMEM; + + p_setd(perm, 0, ALL_VIRT, NO_WRITE); + + /* Writing the data selector is OK, the info is still read-only */ + p_setb(perm, PCI_PWR_DATA, NO_VIRT, (u8)ALL_WRITE); + return 0; +} + +/* + * Initialize the shared permission tables + */ +void vfio_pci_uninit_perm_bits(void) +{ + free_perm_bits(&cap_perms[PCI_CAP_ID_BASIC]); + + free_perm_bits(&cap_perms[PCI_CAP_ID_PM]); + free_perm_bits(&cap_perms[PCI_CAP_ID_VPD]); + free_perm_bits(&cap_perms[PCI_CAP_ID_PCIX]); + free_perm_bits(&cap_perms[PCI_CAP_ID_EXP]); + free_perm_bits(&cap_perms[PCI_CAP_ID_AF]); + + free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_ERR]); + free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_PWR]); +} + +int __init vfio_pci_init_perm_bits(void) +{ + int ret; + + /* Basic config space */ + ret = init_pci_cap_basic_perm(&cap_perms[PCI_CAP_ID_BASIC]); + + /* Capabilities */ + ret |= init_pci_cap_pm_perm(&cap_perms[PCI_CAP_ID_PM]); + ret |= init_pci_cap_vpd_perm(&cap_perms[PCI_CAP_ID_VPD]); + ret |= init_pci_cap_pcix_perm(&cap_perms[PCI_CAP_ID_PCIX]); + cap_perms[PCI_CAP_ID_VNDR].writefn = vfio_raw_config_write; + ret |= init_pci_cap_exp_perm(&cap_perms[PCI_CAP_ID_EXP]); + ret |= init_pci_cap_af_perm(&cap_perms[PCI_CAP_ID_AF]); + + /* Extended capabilities */ + ret |= init_pci_ext_cap_err_perm(&ecap_perms[PCI_EXT_CAP_ID_ERR]); + ret |= init_pci_ext_cap_pwr_perm(&ecap_perms[PCI_EXT_CAP_ID_PWR]); + ecap_perms[PCI_EXT_CAP_ID_VNDR].writefn = vfio_raw_config_write; + + if (ret) + vfio_pci_uninit_perm_bits(); + + return ret; +} + +static int vfio_find_cap_start(struct vfio_pci_device *vdev, int pos) +{ + u8 cap; + int base = (pos >= PCI_CFG_SPACE_SIZE) ? PCI_CFG_SPACE_SIZE : + PCI_STD_HEADER_SIZEOF; + cap = vdev->pci_config_map[pos]; + + if (cap == PCI_CAP_ID_BASIC) + return 0; + + /* XXX Can we have to abutting capabilities of the same type? */ + while (pos - 1 >= base && vdev->pci_config_map[pos - 1] == cap) + pos--; + + return pos; +} + +static int vfio_msi_config_read(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 *val) +{ + /* Update max available queue size from msi_qmax */ + if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) { + __le16 *flags; + int start; + + start = vfio_find_cap_start(vdev, pos); + + flags = (__le16 *)&vdev->vconfig[start]; + + *flags &= cpu_to_le16(~PCI_MSI_FLAGS_QMASK); + *flags |= cpu_to_le16(vdev->msi_qmax << 1); + } + + return vfio_default_config_read(vdev, pos, count, perm, offset, val); +} + +static int vfio_msi_config_write(struct vfio_pci_device *vdev, int pos, + int count, struct perm_bits *perm, + int offset, __le32 val) +{ + count = vfio_default_config_write(vdev, pos, count, perm, offset, val); + if (count < 0) + return count; + + /* Fixup and write configured queue size and enable to hardware */ + if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) { + __le16 *pflags; + u16 flags; + int start, ret; + + start = vfio_find_cap_start(vdev, pos); + + pflags = (__le16 *)&vdev->vconfig[start + PCI_MSI_FLAGS]; + + flags = le16_to_cpu(*pflags); + + /* MSI is enabled via ioctl */ + if (!is_msi(vdev)) + flags &= ~PCI_MSI_FLAGS_ENABLE; + + /* Check queue size */ + if ((flags & PCI_MSI_FLAGS_QSIZE) >> 4 > vdev->msi_qmax) { + flags &= ~PCI_MSI_FLAGS_QSIZE; + flags |= vdev->msi_qmax << 4; + } + + /* Write back to virt and to hardware */ + *pflags = cpu_to_le16(flags); + ret = pci_user_write_config_word(vdev->pdev, + start + PCI_MSI_FLAGS, + flags); + if (ret) + return ret; + } + + return count; +} + +/* + * MSI determination is per-device, so this routine gets used beyond + * initialization time. Don't add __init + */ +static int init_pci_cap_msi_perm(struct perm_bits *perm, int len, u16 flags) +{ + if (alloc_perm_bits(perm, len)) + return -ENOMEM; + + perm->readfn = vfio_msi_config_read; + perm->writefn = vfio_msi_config_write; + + p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); + + /* + * The upper byte of the control register is reserved, + * just setup the lower byte. + */ + p_setb(perm, PCI_MSI_FLAGS, (u8)ALL_VIRT, (u8)ALL_WRITE); + p_setd(perm, PCI_MSI_ADDRESS_LO, ALL_VIRT, ALL_WRITE); + if (flags & PCI_MSI_FLAGS_64BIT) { + p_setd(perm, PCI_MSI_ADDRESS_HI, ALL_VIRT, ALL_WRITE); + p_setw(perm, PCI_MSI_DATA_64, (u16)ALL_VIRT, (u16)ALL_WRITE); + if (flags & PCI_MSI_FLAGS_MASKBIT) { + p_setd(perm, PCI_MSI_MASK_64, NO_VIRT, ALL_WRITE); + p_setd(perm, PCI_MSI_PENDING_64, NO_VIRT, ALL_WRITE); + } + } else { + p_setw(perm, PCI_MSI_DATA_32, (u16)ALL_VIRT, (u16)ALL_WRITE); + if (flags & PCI_MSI_FLAGS_MASKBIT) { + p_setd(perm, PCI_MSI_MASK_32, NO_VIRT, ALL_WRITE); + p_setd(perm, PCI_MSI_PENDING_32, NO_VIRT, ALL_WRITE); + } + } + return 0; +} + +/* Determine MSI CAP field length; initialize msi_perms on 1st call per vdev */ +static int vfio_msi_cap_len(struct vfio_pci_device *vdev, u8 pos) +{ + struct pci_dev *pdev = vdev->pdev; + int len, ret; + u16 flags; + + ret = pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &flags); + if (ret) + return pcibios_err_to_errno(ret); + + len = 10; /* Minimum size */ + if (flags & PCI_MSI_FLAGS_64BIT) + len += 4; + if (flags & PCI_MSI_FLAGS_MASKBIT) + len += 10; + + if (vdev->msi_perm) + return len; + + vdev->msi_perm = kmalloc(sizeof(struct perm_bits), GFP_KERNEL); + if (!vdev->msi_perm) + return -ENOMEM; + + ret = init_pci_cap_msi_perm(vdev->msi_perm, len, flags); + if (ret) { + kfree(vdev->msi_perm); + return ret; + } + + return len; +} + +/* Determine extended capability length for VC (2 & 9) and MFVC */ +static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos) +{ + struct pci_dev *pdev = vdev->pdev; + u32 tmp; + int ret, evcc, phases, vc_arb; + int len = PCI_CAP_VC_BASE_SIZEOF; + + ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP1, &tmp); + if (ret) + return pcibios_err_to_errno(ret); + + evcc = tmp & PCI_VC_CAP1_EVCC; /* extended vc count */ + ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP2, &tmp); + if (ret) + return pcibios_err_to_errno(ret); + + if (tmp & PCI_VC_CAP2_128_PHASE) + phases = 128; + else if (tmp & PCI_VC_CAP2_64_PHASE) + phases = 64; + else if (tmp & PCI_VC_CAP2_32_PHASE) + phases = 32; + else + phases = 0; + + vc_arb = phases * 4; + + /* + * Port arbitration tables are root & switch only; + * function arbitration tables are function 0 only. + * In either case, we'll never let user write them so + * we don't care how big they are + */ + len += (1 + evcc) * PCI_CAP_VC_PER_VC_SIZEOF; + if (vc_arb) { + len = round_up(len, 16); + len += vc_arb / 8; + } + return len; +} + +static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos) +{ + struct pci_dev *pdev = vdev->pdev; + u32 dword; + u16 word; + u8 byte; + int ret; + + switch (cap) { + case PCI_CAP_ID_MSI: + return vfio_msi_cap_len(vdev, pos); + case PCI_CAP_ID_PCIX: + ret = pci_read_config_word(pdev, pos + PCI_X_CMD, &word); + if (ret) + return pcibios_err_to_errno(ret); + + if (PCI_X_CMD_VERSION(word)) { + if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) { + /* Test for extended capabilities */ + pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, + &dword); + vdev->extended_caps = (dword != 0); + } + return PCI_CAP_PCIX_SIZEOF_V2; + } else + return PCI_CAP_PCIX_SIZEOF_V0; + case PCI_CAP_ID_VNDR: + /* length follows next field */ + ret = pci_read_config_byte(pdev, pos + PCI_CAP_FLAGS, &byte); + if (ret) + return pcibios_err_to_errno(ret); + + return byte; + case PCI_CAP_ID_EXP: + if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) { + /* Test for extended capabilities */ + pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword); + vdev->extended_caps = (dword != 0); + } + + /* length based on version and type */ + if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) { + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END) + return 0xc; /* "All Devices" only, no link */ + return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1; + } else { + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END) + return 0x2c; /* No link */ + return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2; + } + case PCI_CAP_ID_HT: + ret = pci_read_config_byte(pdev, pos + 3, &byte); + if (ret) + return pcibios_err_to_errno(ret); + + return (byte & HT_3BIT_CAP_MASK) ? + HT_CAP_SIZEOF_SHORT : HT_CAP_SIZEOF_LONG; + case PCI_CAP_ID_SATA: + ret = pci_read_config_byte(pdev, pos + PCI_SATA_REGS, &byte); + if (ret) + return pcibios_err_to_errno(ret); + + byte &= PCI_SATA_REGS_MASK; + if (byte == PCI_SATA_REGS_INLINE) + return PCI_SATA_SIZEOF_LONG; + else + return PCI_SATA_SIZEOF_SHORT; + default: + pr_warn("%s: %s unknown length for pci cap 0x%x@0x%x\n", + dev_name(&pdev->dev), __func__, cap, pos); + } + + return 0; +} + +static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos) +{ + struct pci_dev *pdev = vdev->pdev; + u8 byte; + u32 dword; + int ret; + + switch (ecap) { + case PCI_EXT_CAP_ID_VNDR: + ret = pci_read_config_dword(pdev, epos + PCI_VSEC_HDR, &dword); + if (ret) + return pcibios_err_to_errno(ret); + + return dword >> PCI_VSEC_HDR_LEN_SHIFT; + case PCI_EXT_CAP_ID_VC: + case PCI_EXT_CAP_ID_VC9: + case PCI_EXT_CAP_ID_MFVC: + return vfio_vc_cap_len(vdev, epos); + case PCI_EXT_CAP_ID_ACS: + ret = pci_read_config_byte(pdev, epos + PCI_ACS_CAP, &byte); + if (ret) + return pcibios_err_to_errno(ret); + + if (byte & PCI_ACS_EC) { + int bits; + + ret = pci_read_config_byte(pdev, + epos + PCI_ACS_EGRESS_BITS, + &byte); + if (ret) + return pcibios_err_to_errno(ret); + + bits = byte ? round_up(byte, 32) : 256; + return 8 + (bits / 8); + } + return 8; + + case PCI_EXT_CAP_ID_REBAR: + ret = pci_read_config_byte(pdev, epos + PCI_REBAR_CTRL, &byte); + if (ret) + return pcibios_err_to_errno(ret); + + byte &= PCI_REBAR_CTRL_NBAR_MASK; + byte >>= PCI_REBAR_CTRL_NBAR_SHIFT; + + return 4 + (byte * 8); + case PCI_EXT_CAP_ID_DPA: + ret = pci_read_config_byte(pdev, epos + PCI_DPA_CAP, &byte); + if (ret) + return pcibios_err_to_errno(ret); + + byte &= PCI_DPA_CAP_SUBSTATE_MASK; + return PCI_DPA_BASE_SIZEOF + byte + 1; + case PCI_EXT_CAP_ID_TPH: + ret = pci_read_config_dword(pdev, epos + PCI_TPH_CAP, &dword); + if (ret) + return pcibios_err_to_errno(ret); + + if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) { + int sts; + + sts = dword & PCI_TPH_CAP_ST_MASK; + sts >>= PCI_TPH_CAP_ST_SHIFT; + return PCI_TPH_BASE_SIZEOF + (sts * 2) + 2; + } + return PCI_TPH_BASE_SIZEOF; + default: + pr_warn("%s: %s unknown length for pci ecap 0x%x@0x%x\n", + dev_name(&pdev->dev), __func__, ecap, epos); + } + + return 0; +} + +static int vfio_fill_vconfig_bytes(struct vfio_pci_device *vdev, + int offset, int size) +{ + struct pci_dev *pdev = vdev->pdev; + int ret = 0; + + /* + * We try to read physical config space in the largest chunks + * we can, assuming that all of the fields support dword access. + * pci_save_state() makes this same assumption and seems to do ok. + */ + while (size) { + int filled; + + if (size >= 4 && !(offset % 4)) { + __le32 *dwordp = (__le32 *)&vdev->vconfig[offset]; + u32 dword; + + ret = pci_read_config_dword(pdev, offset, &dword); + if (ret) + return ret; + *dwordp = cpu_to_le32(dword); + filled = 4; + } else if (size >= 2 && !(offset % 2)) { + __le16 *wordp = (__le16 *)&vdev->vconfig[offset]; + u16 word; + + ret = pci_read_config_word(pdev, offset, &word); + if (ret) + return ret; + *wordp = cpu_to_le16(word); + filled = 2; + } else { + u8 *byte = &vdev->vconfig[offset]; + ret = pci_read_config_byte(pdev, offset, byte); + if (ret) + return ret; + filled = 1; + } + + offset += filled; + size -= filled; + } + + return ret; +} + +static int vfio_cap_init(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + u8 *map = vdev->pci_config_map; + u16 status; + u8 pos, *prev, cap; + int loops, ret, caps = 0; + + /* Any capabilities? */ + ret = pci_read_config_word(pdev, PCI_STATUS, &status); + if (ret) + return ret; + + if (!(status & PCI_STATUS_CAP_LIST)) + return 0; /* Done */ + + ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos); + if (ret) + return ret; + + /* Mark the previous position in case we want to skip a capability */ + prev = &vdev->vconfig[PCI_CAPABILITY_LIST]; + + /* We can bound our loop, capabilities are dword aligned */ + loops = (PCI_CFG_SPACE_SIZE - PCI_STD_HEADER_SIZEOF) / PCI_CAP_SIZEOF; + while (pos && loops--) { + u8 next; + int i, len = 0; + + ret = pci_read_config_byte(pdev, pos, &cap); + if (ret) + return ret; + + ret = pci_read_config_byte(pdev, + pos + PCI_CAP_LIST_NEXT, &next); + if (ret) + return ret; + + /* + * ID 0 is a NULL capability, conflicting with our fake + * PCI_CAP_ID_BASIC. As it has no content, consider it + * hidden for now. + */ + if (cap && cap <= PCI_CAP_ID_MAX) { + len = pci_cap_length[cap]; + if (len == 0xFF) { /* Variable length */ + len = vfio_cap_len(vdev, cap, pos); + if (len < 0) + return len; + } + } + + if (!len) { + pr_info("%s: %s hiding cap 0x%x\n", + __func__, dev_name(&pdev->dev), cap); + *prev = next; + pos = next; + continue; + } + + /* Sanity check, do we overlap other capabilities? */ + for (i = 0; i < len; i++) { + if (likely(map[pos + i] == PCI_CAP_ID_INVALID)) + continue; + + pr_warn("%s: %s pci config conflict @0x%x, was cap 0x%x now cap 0x%x\n", + __func__, dev_name(&pdev->dev), + pos + i, map[pos + i], cap); + } + + BUILD_BUG_ON(PCI_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT); + + memset(map + pos, cap, len); + ret = vfio_fill_vconfig_bytes(vdev, pos, len); + if (ret) + return ret; + + prev = &vdev->vconfig[pos + PCI_CAP_LIST_NEXT]; + pos = next; + caps++; + } + + /* If we didn't fill any capabilities, clear the status flag */ + if (!caps) { + __le16 *vstatus = (__le16 *)&vdev->vconfig[PCI_STATUS]; + *vstatus &= ~cpu_to_le16(PCI_STATUS_CAP_LIST); + } + + return 0; +} + +static int vfio_ecap_init(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + u8 *map = vdev->pci_config_map; + u16 epos; + __le32 *prev = NULL; + int loops, ret, ecaps = 0; + + if (!vdev->extended_caps) + return 0; + + epos = PCI_CFG_SPACE_SIZE; + + loops = (pdev->cfg_size - PCI_CFG_SPACE_SIZE) / PCI_CAP_SIZEOF; + + while (loops-- && epos >= PCI_CFG_SPACE_SIZE) { + u32 header; + u16 ecap; + int i, len = 0; + bool hidden = false; + + ret = pci_read_config_dword(pdev, epos, &header); + if (ret) + return ret; + + ecap = PCI_EXT_CAP_ID(header); + + if (ecap <= PCI_EXT_CAP_ID_MAX) { + len = pci_ext_cap_length[ecap]; + if (len == 0xFF) { + len = vfio_ext_cap_len(vdev, ecap, epos); + if (len < 0) + return len; + } + } + + if (!len) { + pr_info("%s: %s hiding ecap 0x%x@0x%x\n", + __func__, dev_name(&pdev->dev), ecap, epos); + + /* If not the first in the chain, we can skip over it */ + if (prev) { + u32 val = epos = PCI_EXT_CAP_NEXT(header); + *prev &= cpu_to_le32(~(0xffcU << 20)); + *prev |= cpu_to_le32(val << 20); + continue; + } + + /* + * Otherwise, fill in a placeholder, the direct + * readfn will virtualize this automatically + */ + len = PCI_CAP_SIZEOF; + hidden = true; + } + + for (i = 0; i < len; i++) { + if (likely(map[epos + i] == PCI_CAP_ID_INVALID)) + continue; + + pr_warn("%s: %s pci config conflict @0x%x, was ecap 0x%x now ecap 0x%x\n", + __func__, dev_name(&pdev->dev), + epos + i, map[epos + i], ecap); + } + + /* + * Even though ecap is 2 bytes, we're currently a long way + * from exceeding 1 byte capabilities. If we ever make it + * up to 0xFE we'll need to up this to a two-byte, byte map. + */ + BUILD_BUG_ON(PCI_EXT_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT); + + memset(map + epos, ecap, len); + ret = vfio_fill_vconfig_bytes(vdev, epos, len); + if (ret) + return ret; + + /* + * If we're just using this capability to anchor the list, + * hide the real ID. Only count real ecaps. XXX PCI spec + * indicates to use cap id = 0, version = 0, next = 0 if + * ecaps are absent, hope users check all the way to next. + */ + if (hidden) + *(__le32 *)&vdev->vconfig[epos] &= + cpu_to_le32((0xffcU << 20)); + else + ecaps++; + + prev = (__le32 *)&vdev->vconfig[epos]; + epos = PCI_EXT_CAP_NEXT(header); + } + + if (!ecaps) + *(u32 *)&vdev->vconfig[PCI_CFG_SPACE_SIZE] = 0; + + return 0; +} + +/* + * Nag about hardware bugs, hopefully to have vendors fix them, but at least + * to collect a list of dependencies for the VF INTx pin quirk below. + */ +static const struct pci_device_id known_bogus_vf_intx_pin[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x270c) }, + {} +}; + +/* + * For each device we allocate a pci_config_map that indicates the + * capability occupying each dword and thus the struct perm_bits we + * use for read and write. We also allocate a virtualized config + * space which tracks reads and writes to bits that we emulate for + * the user. Initial values filled from device. + * + * Using shared struct perm_bits between all vfio-pci devices saves + * us from allocating cfg_size buffers for virt and write for every + * device. We could remove vconfig and allocate individual buffers + * for each area requiring emulated bits, but the array of pointers + * would be comparable in size (at least for standard config space). + */ +int vfio_config_init(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + u8 *map, *vconfig; + int ret; + + /* + * Config space, caps and ecaps are all dword aligned, so we could + * use one byte per dword to record the type. However, there are + * no requiremenst on the length of a capability, so the gap between + * capabilities needs byte granularity. + */ + map = kmalloc(pdev->cfg_size, GFP_KERNEL); + if (!map) + return -ENOMEM; + + vconfig = kmalloc(pdev->cfg_size, GFP_KERNEL); + if (!vconfig) { + kfree(map); + return -ENOMEM; + } + + vdev->pci_config_map = map; + vdev->vconfig = vconfig; + + memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF); + memset(map + PCI_STD_HEADER_SIZEOF, PCI_CAP_ID_INVALID, + pdev->cfg_size - PCI_STD_HEADER_SIZEOF); + + ret = vfio_fill_vconfig_bytes(vdev, 0, PCI_STD_HEADER_SIZEOF); + if (ret) + goto out; + + vdev->bardirty = true; + + /* + * XXX can we just pci_load_saved_state/pci_restore_state? + * may need to rebuild vconfig after that + */ + + /* For restore after reset */ + vdev->rbar[0] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_0]); + vdev->rbar[1] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_1]); + vdev->rbar[2] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_2]); + vdev->rbar[3] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_3]); + vdev->rbar[4] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_4]); + vdev->rbar[5] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_5]); + vdev->rbar[6] = le32_to_cpu(*(__le32 *)&vconfig[PCI_ROM_ADDRESS]); + + if (pdev->is_virtfn) { + *(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor); + *(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device); + + /* + * Per SR-IOV spec rev 1.1, 3.4.1.18 the interrupt pin register + * does not apply to VFs and VFs must implement this register + * as read-only with value zero. Userspace is not readily able + * to identify whether a device is a VF and thus that the pin + * definition on the device is bogus should it violate this + * requirement. We already virtualize the pin register for + * other purposes, so we simply need to replace the bogus value + * and consider VFs when we determine INTx IRQ count. + */ + if (vconfig[PCI_INTERRUPT_PIN] && + !pci_match_id(known_bogus_vf_intx_pin, pdev)) + pci_warn(pdev, + "Hardware bug: VF reports bogus INTx pin %d\n", + vconfig[PCI_INTERRUPT_PIN]); + + vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ + + /* + * VFs do no implement the memory enable bit of the COMMAND + * register therefore we'll not have it set in our initial + * copy of config space after pci_enable_device(). For + * consistency with PFs, set the virtual enable bit here. + */ + *(__le16 *)&vconfig[PCI_COMMAND] |= + cpu_to_le16(PCI_COMMAND_MEMORY); + } + + if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) + vconfig[PCI_INTERRUPT_PIN] = 0; + + ret = vfio_cap_init(vdev); + if (ret) + goto out; + + ret = vfio_ecap_init(vdev); + if (ret) + goto out; + + return 0; + +out: + kfree(map); + vdev->pci_config_map = NULL; + kfree(vconfig); + vdev->vconfig = NULL; + return pcibios_err_to_errno(ret); +} + +void vfio_config_free(struct vfio_pci_device *vdev) +{ + kfree(vdev->vconfig); + vdev->vconfig = NULL; + kfree(vdev->pci_config_map); + vdev->pci_config_map = NULL; + if (vdev->msi_perm) { + free_perm_bits(vdev->msi_perm); + kfree(vdev->msi_perm); + vdev->msi_perm = NULL; + } +} + +/* + * Find the remaining number of bytes in a dword that match the given + * position. Stop at either the end of the capability or the dword boundary. + */ +static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_device *vdev, + loff_t pos) +{ + u8 cap = vdev->pci_config_map[pos]; + size_t i; + + for (i = 1; (pos + i) % 4 && vdev->pci_config_map[pos + i] == cap; i++) + /* nop */; + + return i; +} + +static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite) +{ + struct pci_dev *pdev = vdev->pdev; + struct perm_bits *perm; + __le32 val = 0; + int cap_start = 0, offset; + u8 cap_id; + ssize_t ret; + + if (*ppos < 0 || *ppos >= pdev->cfg_size || + *ppos + count > pdev->cfg_size) + return -EFAULT; + + /* + * Chop accesses into aligned chunks containing no more than a + * single capability. Caller increments to the next chunk. + */ + count = min(count, vfio_pci_cap_remaining_dword(vdev, *ppos)); + if (count >= 4 && !(*ppos % 4)) + count = 4; + else if (count >= 2 && !(*ppos % 2)) + count = 2; + else + count = 1; + + ret = count; + + cap_id = vdev->pci_config_map[*ppos]; + + if (cap_id == PCI_CAP_ID_INVALID) { + perm = &unassigned_perms; + cap_start = *ppos; + } else if (cap_id == PCI_CAP_ID_INVALID_VIRT) { + perm = &virt_perms; + cap_start = *ppos; + } else { + if (*ppos >= PCI_CFG_SPACE_SIZE) { + WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX); + + perm = &ecap_perms[cap_id]; + cap_start = vfio_find_cap_start(vdev, *ppos); + } else { + WARN_ON(cap_id > PCI_CAP_ID_MAX); + + perm = &cap_perms[cap_id]; + + if (cap_id == PCI_CAP_ID_MSI) + perm = vdev->msi_perm; + + if (cap_id > PCI_CAP_ID_BASIC) + cap_start = vfio_find_cap_start(vdev, *ppos); + } + } + + WARN_ON(!cap_start && cap_id != PCI_CAP_ID_BASIC); + WARN_ON(cap_start > *ppos); + + offset = *ppos - cap_start; + + if (iswrite) { + if (!perm->writefn) + return ret; + + if (copy_from_user(&val, buf, count)) + return -EFAULT; + + ret = perm->writefn(vdev, *ppos, count, perm, offset, val); + } else { + if (perm->readfn) { + ret = perm->readfn(vdev, *ppos, count, + perm, offset, &val); + if (ret < 0) + return ret; + } + + if (copy_to_user(buf, &val, count)) + return -EFAULT; + } + + return ret; +} + +ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite) +{ + size_t done = 0; + int ret = 0; + loff_t pos = *ppos; + + pos &= VFIO_PCI_OFFSET_MASK; + + while (count) { + ret = vfio_config_do_rw(vdev, buf, count, &pos, iswrite); + if (ret < 0) + return ret; + + count -= ret; + done += ret; + buf += ret; + pos += ret; + } + + *ppos += done; + + return done; +} diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c new file mode 100644 index 000000000..6394b168e --- /dev/null +++ b/drivers/vfio/pci/vfio_pci_igd.c @@ -0,0 +1,280 @@ +/* + * VFIO PCI Intel Graphics support + * + * Copyright (C) 2016 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Register a device specific region through which to provide read-only + * access to the Intel IGD opregion. The register defining the opregion + * address is also virtualized to prevent user modification. + */ + +#include <linux/io.h> +#include <linux/pci.h> +#include <linux/uaccess.h> +#include <linux/vfio.h> + +#include "vfio_pci_private.h" + +#define OPREGION_SIGNATURE "IntelGraphicsMem" +#define OPREGION_SIZE (8 * 1024) +#define OPREGION_PCI_ADDR 0xfc + +static size_t vfio_pci_igd_rw(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite) +{ + unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; + void *base = vdev->region[i].data; + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + + if (pos >= vdev->region[i].size || iswrite) + return -EINVAL; + + count = min(count, (size_t)(vdev->region[i].size - pos)); + + if (copy_to_user(buf, base + pos, count)) + return -EFAULT; + + *ppos += count; + + return count; +} + +static void vfio_pci_igd_release(struct vfio_pci_device *vdev, + struct vfio_pci_region *region) +{ + memunmap(region->data); +} + +static const struct vfio_pci_regops vfio_pci_igd_regops = { + .rw = vfio_pci_igd_rw, + .release = vfio_pci_igd_release, +}; + +static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev) +{ + __le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR); + u32 addr, size; + void *base; + int ret; + + ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr); + if (ret) + return ret; + + if (!addr || !(~addr)) + return -ENODEV; + + base = memremap(addr, OPREGION_SIZE, MEMREMAP_WB); + if (!base) + return -ENOMEM; + + if (memcmp(base, OPREGION_SIGNATURE, 16)) { + memunmap(base); + return -EINVAL; + } + + size = le32_to_cpu(*(__le32 *)(base + 16)); + if (!size) { + memunmap(base); + return -EINVAL; + } + + size *= 1024; /* In KB */ + + if (size != OPREGION_SIZE) { + memunmap(base); + base = memremap(addr, size, MEMREMAP_WB); + if (!base) + return -ENOMEM; + } + + ret = vfio_pci_register_dev_region(vdev, + PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, + VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, + &vfio_pci_igd_regops, size, VFIO_REGION_INFO_FLAG_READ, base); + if (ret) { + memunmap(base); + return ret; + } + + /* Fill vconfig with the hw value and virtualize register */ + *dwordp = cpu_to_le32(addr); + memset(vdev->pci_config_map + OPREGION_PCI_ADDR, + PCI_CAP_ID_INVALID_VIRT, 4); + + return ret; +} + +static size_t vfio_pci_igd_cfg_rw(struct vfio_pci_device *vdev, + char __user *buf, size_t count, loff_t *ppos, + bool iswrite) +{ + unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; + struct pci_dev *pdev = vdev->region[i].data; + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + size_t size; + int ret; + + if (pos >= vdev->region[i].size || iswrite) + return -EINVAL; + + size = count = min(count, (size_t)(vdev->region[i].size - pos)); + + if ((pos & 1) && size) { + u8 val; + + ret = pci_user_read_config_byte(pdev, pos, &val); + if (ret) + return pcibios_err_to_errno(ret); + + if (copy_to_user(buf + count - size, &val, 1)) + return -EFAULT; + + pos++; + size--; + } + + if ((pos & 3) && size > 2) { + u16 val; + + ret = pci_user_read_config_word(pdev, pos, &val); + if (ret) + return pcibios_err_to_errno(ret); + + val = cpu_to_le16(val); + if (copy_to_user(buf + count - size, &val, 2)) + return -EFAULT; + + pos += 2; + size -= 2; + } + + while (size > 3) { + u32 val; + + ret = pci_user_read_config_dword(pdev, pos, &val); + if (ret) + return pcibios_err_to_errno(ret); + + val = cpu_to_le32(val); + if (copy_to_user(buf + count - size, &val, 4)) + return -EFAULT; + + pos += 4; + size -= 4; + } + + while (size >= 2) { + u16 val; + + ret = pci_user_read_config_word(pdev, pos, &val); + if (ret) + return pcibios_err_to_errno(ret); + + val = cpu_to_le16(val); + if (copy_to_user(buf + count - size, &val, 2)) + return -EFAULT; + + pos += 2; + size -= 2; + } + + while (size) { + u8 val; + + ret = pci_user_read_config_byte(pdev, pos, &val); + if (ret) + return pcibios_err_to_errno(ret); + + if (copy_to_user(buf + count - size, &val, 1)) + return -EFAULT; + + pos++; + size--; + } + + *ppos += count; + + return count; +} + +static void vfio_pci_igd_cfg_release(struct vfio_pci_device *vdev, + struct vfio_pci_region *region) +{ + struct pci_dev *pdev = region->data; + + pci_dev_put(pdev); +} + +static const struct vfio_pci_regops vfio_pci_igd_cfg_regops = { + .rw = vfio_pci_igd_cfg_rw, + .release = vfio_pci_igd_cfg_release, +}; + +static int vfio_pci_igd_cfg_init(struct vfio_pci_device *vdev) +{ + struct pci_dev *host_bridge, *lpc_bridge; + int ret; + + host_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); + if (!host_bridge) + return -ENODEV; + + if (host_bridge->vendor != PCI_VENDOR_ID_INTEL || + host_bridge->class != (PCI_CLASS_BRIDGE_HOST << 8)) { + pci_dev_put(host_bridge); + return -EINVAL; + } + + ret = vfio_pci_register_dev_region(vdev, + PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, + VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG, + &vfio_pci_igd_cfg_regops, host_bridge->cfg_size, + VFIO_REGION_INFO_FLAG_READ, host_bridge); + if (ret) { + pci_dev_put(host_bridge); + return ret; + } + + lpc_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x1f, 0)); + if (!lpc_bridge) + return -ENODEV; + + if (lpc_bridge->vendor != PCI_VENDOR_ID_INTEL || + lpc_bridge->class != (PCI_CLASS_BRIDGE_ISA << 8)) { + pci_dev_put(lpc_bridge); + return -EINVAL; + } + + ret = vfio_pci_register_dev_region(vdev, + PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, + VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG, + &vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size, + VFIO_REGION_INFO_FLAG_READ, lpc_bridge); + if (ret) { + pci_dev_put(lpc_bridge); + return ret; + } + + return 0; +} + +int vfio_pci_igd_init(struct vfio_pci_device *vdev) +{ + int ret; + + ret = vfio_pci_igd_opregion_init(vdev); + if (ret) + return ret; + + ret = vfio_pci_igd_cfg_init(vdev); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c new file mode 100644 index 000000000..c989f777b --- /dev/null +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -0,0 +1,696 @@ +/* + * VFIO PCI interrupt handling + * + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + */ + +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/eventfd.h> +#include <linux/msi.h> +#include <linux/pci.h> +#include <linux/file.h> +#include <linux/vfio.h> +#include <linux/wait.h> +#include <linux/slab.h> + +#include "vfio_pci_private.h" + +/* + * INTx + */ +static void vfio_send_intx_eventfd(void *opaque, void *unused) +{ + struct vfio_pci_device *vdev = opaque; + + if (likely(is_intx(vdev) && !vdev->virq_disabled)) + eventfd_signal(vdev->ctx[0].trigger, 1); +} + +void vfio_pci_intx_mask(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + unsigned long flags; + + spin_lock_irqsave(&vdev->irqlock, flags); + + /* + * Masking can come from interrupt, ioctl, or config space + * via INTx disable. The latter means this can get called + * even when not using intx delivery. In this case, just + * try to have the physical bit follow the virtual bit. + */ + if (unlikely(!is_intx(vdev))) { + if (vdev->pci_2_3) + pci_intx(pdev, 0); + } else if (!vdev->ctx[0].masked) { + /* + * Can't use check_and_mask here because we always want to + * mask, not just when something is pending. + */ + if (vdev->pci_2_3) + pci_intx(pdev, 0); + else + disable_irq_nosync(pdev->irq); + + vdev->ctx[0].masked = true; + } + + spin_unlock_irqrestore(&vdev->irqlock, flags); +} + +/* + * If this is triggered by an eventfd, we can't call eventfd_signal + * or else we'll deadlock on the eventfd wait queue. Return >0 when + * a signal is necessary, which can then be handled via a work queue + * or directly depending on the caller. + */ +static int vfio_pci_intx_unmask_handler(void *opaque, void *unused) +{ + struct vfio_pci_device *vdev = opaque; + struct pci_dev *pdev = vdev->pdev; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&vdev->irqlock, flags); + + /* + * Unmasking comes from ioctl or config, so again, have the + * physical bit follow the virtual even when not using INTx. + */ + if (unlikely(!is_intx(vdev))) { + if (vdev->pci_2_3) + pci_intx(pdev, 1); + } else if (vdev->ctx[0].masked && !vdev->virq_disabled) { + /* + * A pending interrupt here would immediately trigger, + * but we can avoid that overhead by just re-sending + * the interrupt to the user. + */ + if (vdev->pci_2_3) { + if (!pci_check_and_unmask_intx(pdev)) + ret = 1; + } else + enable_irq(pdev->irq); + + vdev->ctx[0].masked = (ret > 0); + } + + spin_unlock_irqrestore(&vdev->irqlock, flags); + + return ret; +} + +void vfio_pci_intx_unmask(struct vfio_pci_device *vdev) +{ + if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0) + vfio_send_intx_eventfd(vdev, NULL); +} + +static irqreturn_t vfio_intx_handler(int irq, void *dev_id) +{ + struct vfio_pci_device *vdev = dev_id; + unsigned long flags; + int ret = IRQ_NONE; + + spin_lock_irqsave(&vdev->irqlock, flags); + + if (!vdev->pci_2_3) { + disable_irq_nosync(vdev->pdev->irq); + vdev->ctx[0].masked = true; + ret = IRQ_HANDLED; + } else if (!vdev->ctx[0].masked && /* may be shared */ + pci_check_and_mask_intx(vdev->pdev)) { + vdev->ctx[0].masked = true; + ret = IRQ_HANDLED; + } + + spin_unlock_irqrestore(&vdev->irqlock, flags); + + if (ret == IRQ_HANDLED) + vfio_send_intx_eventfd(vdev, NULL); + + return ret; +} + +static int vfio_intx_enable(struct vfio_pci_device *vdev) +{ + if (!is_irq_none(vdev)) + return -EINVAL; + + if (!vdev->pdev->irq) + return -ENODEV; + + vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); + if (!vdev->ctx) + return -ENOMEM; + + vdev->num_ctx = 1; + + /* + * If the virtual interrupt is masked, restore it. Devices + * supporting DisINTx can be masked at the hardware level + * here, non-PCI-2.3 devices will have to wait until the + * interrupt is enabled. + */ + vdev->ctx[0].masked = vdev->virq_disabled; + if (vdev->pci_2_3) + pci_intx(vdev->pdev, !vdev->ctx[0].masked); + + vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX; + + return 0; +} + +static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd) +{ + struct pci_dev *pdev = vdev->pdev; + unsigned long irqflags = IRQF_SHARED; + struct eventfd_ctx *trigger; + unsigned long flags; + int ret; + + if (vdev->ctx[0].trigger) { + free_irq(pdev->irq, vdev); + kfree(vdev->ctx[0].name); + eventfd_ctx_put(vdev->ctx[0].trigger); + vdev->ctx[0].trigger = NULL; + } + + if (fd < 0) /* Disable only */ + return 0; + + vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)", + pci_name(pdev)); + if (!vdev->ctx[0].name) + return -ENOMEM; + + trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(trigger)) { + kfree(vdev->ctx[0].name); + return PTR_ERR(trigger); + } + + vdev->ctx[0].trigger = trigger; + + if (!vdev->pci_2_3) + irqflags = 0; + + ret = request_irq(pdev->irq, vfio_intx_handler, + irqflags, vdev->ctx[0].name, vdev); + if (ret) { + vdev->ctx[0].trigger = NULL; + kfree(vdev->ctx[0].name); + eventfd_ctx_put(trigger); + return ret; + } + + /* + * INTx disable will stick across the new irq setup, + * disable_irq won't. + */ + spin_lock_irqsave(&vdev->irqlock, flags); + if (!vdev->pci_2_3 && vdev->ctx[0].masked) + disable_irq_nosync(pdev->irq); + spin_unlock_irqrestore(&vdev->irqlock, flags); + + return 0; +} + +static void vfio_intx_disable(struct vfio_pci_device *vdev) +{ + vfio_virqfd_disable(&vdev->ctx[0].unmask); + vfio_virqfd_disable(&vdev->ctx[0].mask); + vfio_intx_set_signal(vdev, -1); + vdev->irq_type = VFIO_PCI_NUM_IRQS; + vdev->num_ctx = 0; + kfree(vdev->ctx); +} + +/* + * MSI/MSI-X + */ +static irqreturn_t vfio_msihandler(int irq, void *arg) +{ + struct eventfd_ctx *trigger = arg; + + eventfd_signal(trigger, 1); + return IRQ_HANDLED; +} + +static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) +{ + struct pci_dev *pdev = vdev->pdev; + unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; + int ret; + u16 cmd; + + if (!is_irq_none(vdev)) + return -EINVAL; + + vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); + if (!vdev->ctx) + return -ENOMEM; + + /* return the number of supported vectors if we can't get all: */ + cmd = vfio_pci_memory_lock_and_enable(vdev); + ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); + if (ret < nvec) { + if (ret > 0) + pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); + kfree(vdev->ctx); + return ret; + } + vfio_pci_memory_unlock_and_restore(vdev, cmd); + + vdev->num_ctx = nvec; + vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : + VFIO_PCI_MSI_IRQ_INDEX; + + if (!msix) { + /* + * Compute the virtual hardware field for max msi vectors - + * it is the log base 2 of the number of vectors. + */ + vdev->msi_qmax = fls(nvec * 2 - 1) - 1; + } + + return 0; +} + +static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, + int vector, int fd, bool msix) +{ + struct pci_dev *pdev = vdev->pdev; + struct eventfd_ctx *trigger; + int irq, ret; + u16 cmd; + + if (vector < 0 || vector >= vdev->num_ctx) + return -EINVAL; + + irq = pci_irq_vector(pdev, vector); + + if (vdev->ctx[vector].trigger) { + irq_bypass_unregister_producer(&vdev->ctx[vector].producer); + + cmd = vfio_pci_memory_lock_and_enable(vdev); + free_irq(irq, vdev->ctx[vector].trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); + + kfree(vdev->ctx[vector].name); + eventfd_ctx_put(vdev->ctx[vector].trigger); + vdev->ctx[vector].trigger = NULL; + } + + if (fd < 0) + return 0; + + vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)", + msix ? "x" : "", vector, + pci_name(pdev)); + if (!vdev->ctx[vector].name) + return -ENOMEM; + + trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(trigger)) { + kfree(vdev->ctx[vector].name); + return PTR_ERR(trigger); + } + + /* + * The MSIx vector table resides in device memory which may be cleared + * via backdoor resets. We don't allow direct access to the vector + * table so even if a userspace driver attempts to save/restore around + * such a reset it would be unsuccessful. To avoid this, restore the + * cached value of the message prior to enabling. + */ + cmd = vfio_pci_memory_lock_and_enable(vdev); + if (msix) { + struct msi_msg msg; + + get_cached_msi_msg(irq, &msg); + pci_write_msi_msg(irq, &msg); + } + + ret = request_irq(irq, vfio_msihandler, 0, + vdev->ctx[vector].name, trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); + if (ret) { + kfree(vdev->ctx[vector].name); + eventfd_ctx_put(trigger); + return ret; + } + + vdev->ctx[vector].producer.token = trigger; + vdev->ctx[vector].producer.irq = irq; + ret = irq_bypass_register_producer(&vdev->ctx[vector].producer); + if (unlikely(ret)) { + dev_info(&pdev->dev, + "irq bypass producer (token %p) registration fails: %d\n", + vdev->ctx[vector].producer.token, ret); + + vdev->ctx[vector].producer.token = NULL; + } + vdev->ctx[vector].trigger = trigger; + + return 0; +} + +static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start, + unsigned count, int32_t *fds, bool msix) +{ + int i, j, ret = 0; + + if (start >= vdev->num_ctx || start + count > vdev->num_ctx) + return -EINVAL; + + for (i = 0, j = start; i < count && !ret; i++, j++) { + int fd = fds ? fds[i] : -1; + ret = vfio_msi_set_vector_signal(vdev, j, fd, msix); + } + + if (ret) { + for (--j; j >= (int)start; j--) + vfio_msi_set_vector_signal(vdev, j, -1, msix); + } + + return ret; +} + +static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) +{ + struct pci_dev *pdev = vdev->pdev; + int i; + u16 cmd; + + for (i = 0; i < vdev->num_ctx; i++) { + vfio_virqfd_disable(&vdev->ctx[i].unmask); + vfio_virqfd_disable(&vdev->ctx[i].mask); + } + + vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); + + cmd = vfio_pci_memory_lock_and_enable(vdev); + pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); + + /* + * Both disable paths above use pci_intx_for_msi() to clear DisINTx + * via their shutdown paths. Restore for NoINTx devices. + */ + if (vdev->nointx) + pci_intx(pdev, 0); + + vdev->irq_type = VFIO_PCI_NUM_IRQS; + vdev->num_ctx = 0; + kfree(vdev->ctx); +} + +/* + * IOCTL support + */ +static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) +{ + if (!is_intx(vdev) || start != 0 || count != 1) + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_NONE) { + vfio_pci_intx_unmask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t unmask = *(uint8_t *)data; + if (unmask) + vfio_pci_intx_unmask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t fd = *(int32_t *)data; + if (fd >= 0) + return vfio_virqfd_enable((void *) vdev, + vfio_pci_intx_unmask_handler, + vfio_send_intx_eventfd, NULL, + &vdev->ctx[0].unmask, fd); + + vfio_virqfd_disable(&vdev->ctx[0].unmask); + } + + return 0; +} + +static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) +{ + if (!is_intx(vdev) || start != 0 || count != 1) + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_NONE) { + vfio_pci_intx_mask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t mask = *(uint8_t *)data; + if (mask) + vfio_pci_intx_mask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + return -ENOTTY; /* XXX implement me */ + } + + return 0; +} + +static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) +{ + if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) { + vfio_intx_disable(vdev); + return 0; + } + + if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1) + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t fd = *(int32_t *)data; + int ret; + + if (is_intx(vdev)) + return vfio_intx_set_signal(vdev, fd); + + ret = vfio_intx_enable(vdev); + if (ret) + return ret; + + ret = vfio_intx_set_signal(vdev, fd); + if (ret) + vfio_intx_disable(vdev); + + return ret; + } + + if (!is_intx(vdev)) + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_NONE) { + vfio_send_intx_eventfd(vdev, NULL); + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t trigger = *(uint8_t *)data; + if (trigger) + vfio_send_intx_eventfd(vdev, NULL); + } + return 0; +} + +static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) +{ + int i; + bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false; + + if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) { + vfio_msi_disable(vdev, msix); + return 0; + } + + if (!(irq_is(vdev, index) || is_irq_none(vdev))) + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t *fds = data; + int ret; + + if (vdev->irq_type == index) + return vfio_msi_set_block(vdev, start, count, + fds, msix); + + ret = vfio_msi_enable(vdev, start + count, msix); + if (ret) + return ret; + + ret = vfio_msi_set_block(vdev, start, count, fds, msix); + if (ret) + vfio_msi_disable(vdev, msix); + + return ret; + } + + if (!irq_is(vdev, index) || start + count > vdev->num_ctx) + return -EINVAL; + + for (i = start; i < start + count; i++) { + if (!vdev->ctx[i].trigger) + continue; + if (flags & VFIO_IRQ_SET_DATA_NONE) { + eventfd_signal(vdev->ctx[i].trigger, 1); + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t *bools = data; + if (bools[i - start]) + eventfd_signal(vdev->ctx[i].trigger, 1); + } + } + return 0; +} + +static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, + unsigned int count, uint32_t flags, + void *data) +{ + /* DATA_NONE/DATA_BOOL enables loopback testing */ + if (flags & VFIO_IRQ_SET_DATA_NONE) { + if (*ctx) { + if (count) { + eventfd_signal(*ctx, 1); + } else { + eventfd_ctx_put(*ctx); + *ctx = NULL; + } + return 0; + } + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t trigger; + + if (!count) + return -EINVAL; + + trigger = *(uint8_t *)data; + if (trigger && *ctx) + eventfd_signal(*ctx, 1); + + return 0; + } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t fd; + + if (!count) + return -EINVAL; + + fd = *(int32_t *)data; + if (fd == -1) { + if (*ctx) + eventfd_ctx_put(*ctx); + *ctx = NULL; + } else if (fd >= 0) { + struct eventfd_ctx *efdctx; + + efdctx = eventfd_ctx_fdget(fd); + if (IS_ERR(efdctx)) + return PTR_ERR(efdctx); + + if (*ctx) + eventfd_ctx_put(*ctx); + + *ctx = efdctx; + } + return 0; + } + + return -EINVAL; +} + +static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) +{ + if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1) + return -EINVAL; + + return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, + count, flags, data); +} + +static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) +{ + if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1) + return -EINVAL; + + return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, + count, flags, data); +} + +int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, + unsigned index, unsigned start, unsigned count, + void *data) +{ + int (*func)(struct vfio_pci_device *vdev, unsigned index, + unsigned start, unsigned count, uint32_t flags, + void *data) = NULL; + + switch (index) { + case VFIO_PCI_INTX_IRQ_INDEX: + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_MASK: + func = vfio_pci_set_intx_mask; + break; + case VFIO_IRQ_SET_ACTION_UNMASK: + func = vfio_pci_set_intx_unmask; + break; + case VFIO_IRQ_SET_ACTION_TRIGGER: + func = vfio_pci_set_intx_trigger; + break; + } + break; + case VFIO_PCI_MSI_IRQ_INDEX: + case VFIO_PCI_MSIX_IRQ_INDEX: + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_MASK: + case VFIO_IRQ_SET_ACTION_UNMASK: + /* XXX Need masking support exported */ + break; + case VFIO_IRQ_SET_ACTION_TRIGGER: + func = vfio_pci_set_msi_trigger; + break; + } + break; + case VFIO_PCI_ERR_IRQ_INDEX: + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_TRIGGER: + if (pci_is_pcie(vdev->pdev)) + func = vfio_pci_set_err_trigger; + break; + } + break; + case VFIO_PCI_REQ_IRQ_INDEX: + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_TRIGGER: + func = vfio_pci_set_req_trigger; + break; + } + break; + } + + if (!func) + return -ENOTTY; + + return func(vdev, index, start, count, flags, data); +} diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h new file mode 100644 index 000000000..17d2bae5b --- /dev/null +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + */ + +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/irqbypass.h> +#include <linux/types.h> + +#ifndef VFIO_PCI_PRIVATE_H +#define VFIO_PCI_PRIVATE_H + +#define VFIO_PCI_OFFSET_SHIFT 40 + +#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT) +#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) +#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) + +/* Special capability IDs predefined access */ +#define PCI_CAP_ID_INVALID 0xFF /* default raw access */ +#define PCI_CAP_ID_INVALID_VIRT 0xFE /* default virt access */ + +/* Cap maximum number of ioeventfds per device (arbitrary) */ +#define VFIO_PCI_IOEVENTFD_MAX 1000 + +struct vfio_pci_ioeventfd { + struct list_head next; + struct virqfd *virqfd; + void __iomem *addr; + uint64_t data; + loff_t pos; + int bar; + int count; +}; + +struct vfio_pci_irq_ctx { + struct eventfd_ctx *trigger; + struct virqfd *unmask; + struct virqfd *mask; + char *name; + bool masked; + struct irq_bypass_producer producer; +}; + +struct vfio_pci_device; +struct vfio_pci_region; + +struct vfio_pci_regops { + size_t (*rw)(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); + void (*release)(struct vfio_pci_device *vdev, + struct vfio_pci_region *region); +}; + +struct vfio_pci_region { + u32 type; + u32 subtype; + const struct vfio_pci_regops *ops; + void *data; + size_t size; + u32 flags; +}; + +struct vfio_pci_dummy_resource { + struct resource resource; + int index; + struct list_head res_next; +}; + +struct vfio_pci_mmap_vma { + struct vm_area_struct *vma; + struct list_head vma_next; +}; + +struct vfio_pci_device { + struct pci_dev *pdev; + void __iomem *barmap[PCI_STD_RESOURCE_END + 1]; + bool bar_mmap_supported[PCI_STD_RESOURCE_END + 1]; + u8 *pci_config_map; + u8 *vconfig; + struct perm_bits *msi_perm; + spinlock_t irqlock; + struct mutex igate; + struct vfio_pci_irq_ctx *ctx; + int num_ctx; + int irq_type; + int num_regions; + struct vfio_pci_region *region; + u8 msi_qmax; + u8 msix_bar; + u16 msix_size; + u32 msix_offset; + u32 rbar[7]; + bool pci_2_3; + bool virq_disabled; + bool reset_works; + bool extended_caps; + bool bardirty; + bool has_vga; + bool needs_reset; + bool nointx; + struct pci_saved_state *pci_saved_state; + int refcnt; + int ioeventfds_nr; + struct eventfd_ctx *err_trigger; + struct eventfd_ctx *req_trigger; + struct list_head dummy_resources_list; + struct mutex ioeventfds_lock; + struct list_head ioeventfds_list; + struct mutex vma_lock; + struct list_head vma_list; + struct rw_semaphore memory_lock; +}; + +#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) +#define is_msi(vdev) (vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX) +#define is_msix(vdev) (vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX) +#define is_irq_none(vdev) (!(is_intx(vdev) || is_msi(vdev) || is_msix(vdev))) +#define irq_is(vdev, type) (vdev->irq_type == type) + +extern void vfio_pci_intx_mask(struct vfio_pci_device *vdev); +extern void vfio_pci_intx_unmask(struct vfio_pci_device *vdev); + +extern int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, + uint32_t flags, unsigned index, + unsigned start, unsigned count, void *data); + +extern ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev, + char __user *buf, size_t count, + loff_t *ppos, bool iswrite); + +extern ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); + +extern ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); + +extern long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset, + uint64_t data, int count, int fd); + +extern int vfio_pci_init_perm_bits(void); +extern void vfio_pci_uninit_perm_bits(void); + +extern int vfio_config_init(struct vfio_pci_device *vdev); +extern void vfio_config_free(struct vfio_pci_device *vdev); + +extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, + unsigned int type, unsigned int subtype, + const struct vfio_pci_regops *ops, + size_t size, u32 flags, void *data); + +extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev); +extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device + *vdev); +extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev); +extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, + u16 cmd); + +#ifdef CONFIG_VFIO_PCI_IGD +extern int vfio_pci_igd_init(struct vfio_pci_device *vdev); +#else +static inline int vfio_pci_igd_init(struct vfio_pci_device *vdev) +{ + return -ENODEV; +} +#endif +#endif /* VFIO_PCI_PRIVATE_H */ diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c new file mode 100644 index 000000000..3d0ec2bbe --- /dev/null +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -0,0 +1,404 @@ +/* + * VFIO PCI I/O Port & MMIO access + * + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + */ + +#include <linux/fs.h> +#include <linux/pci.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/vfio.h> +#include <linux/vgaarb.h> + +#include "vfio_pci_private.h" + +#ifdef __LITTLE_ENDIAN +#define vfio_ioread64 ioread64 +#define vfio_iowrite64 iowrite64 +#define vfio_ioread32 ioread32 +#define vfio_iowrite32 iowrite32 +#define vfio_ioread16 ioread16 +#define vfio_iowrite16 iowrite16 +#else +#define vfio_ioread64 ioread64be +#define vfio_iowrite64 iowrite64be +#define vfio_ioread32 ioread32be +#define vfio_iowrite32 iowrite32be +#define vfio_ioread16 ioread16be +#define vfio_iowrite16 iowrite16be +#endif +#define vfio_ioread8 ioread8 +#define vfio_iowrite8 iowrite8 + +/* + * Read or write from an __iomem region (MMIO or I/O port) with an excluded + * range which is inaccessible. The excluded range drops writes and fills + * reads with -1. This is intended for handling MSI-X vector tables and + * leftover space for ROM BARs. + */ +static ssize_t do_io_rw(void __iomem *io, char __user *buf, + loff_t off, size_t count, size_t x_start, + size_t x_end, bool iswrite) +{ + ssize_t done = 0; + + while (count) { + size_t fillable, filled; + + if (off < x_start) + fillable = min(count, (size_t)(x_start - off)); + else if (off >= x_end) + fillable = count; + else + fillable = 0; + + if (fillable >= 4 && !(off % 4)) { + u32 val; + + if (iswrite) { + if (copy_from_user(&val, buf, 4)) + return -EFAULT; + + vfio_iowrite32(val, io + off); + } else { + val = vfio_ioread32(io + off); + + if (copy_to_user(buf, &val, 4)) + return -EFAULT; + } + + filled = 4; + } else if (fillable >= 2 && !(off % 2)) { + u16 val; + + if (iswrite) { + if (copy_from_user(&val, buf, 2)) + return -EFAULT; + + vfio_iowrite16(val, io + off); + } else { + val = vfio_ioread16(io + off); + + if (copy_to_user(buf, &val, 2)) + return -EFAULT; + } + + filled = 2; + } else if (fillable) { + u8 val; + + if (iswrite) { + if (copy_from_user(&val, buf, 1)) + return -EFAULT; + + vfio_iowrite8(val, io + off); + } else { + val = vfio_ioread8(io + off); + + if (copy_to_user(buf, &val, 1)) + return -EFAULT; + } + + filled = 1; + } else { + /* Fill reads with -1, drop writes */ + filled = min(count, (size_t)(x_end - off)); + if (!iswrite) { + u8 val = 0xFF; + size_t i; + + for (i = 0; i < filled; i++) + if (copy_to_user(buf + i, &val, 1)) + return -EFAULT; + } + } + + count -= filled; + done += filled; + off += filled; + buf += filled; + } + + return done; +} + +static int vfio_pci_setup_barmap(struct vfio_pci_device *vdev, int bar) +{ + struct pci_dev *pdev = vdev->pdev; + int ret; + void __iomem *io; + + if (vdev->barmap[bar]) + return 0; + + ret = pci_request_selected_regions(pdev, 1 << bar, "vfio"); + if (ret) + return ret; + + io = pci_iomap(pdev, bar, 0); + if (!io) { + pci_release_selected_regions(pdev, 1 << bar); + return -ENOMEM; + } + + vdev->barmap[bar] = io; + + return 0; +} + +ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite) +{ + struct pci_dev *pdev = vdev->pdev; + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + size_t x_start = 0, x_end = 0; + resource_size_t end; + void __iomem *io; + struct resource *res = &vdev->pdev->resource[bar]; + ssize_t done; + + if (pci_resource_start(pdev, bar)) + end = pci_resource_len(pdev, bar); + else if (bar == PCI_ROM_RESOURCE && + pdev->resource[bar].flags & IORESOURCE_ROM_SHADOW) + end = 0x20000; + else + return -EINVAL; + + if (pos >= end) + return -EINVAL; + + count = min(count, (size_t)(end - pos)); + + if (res->flags & IORESOURCE_MEM) { + down_read(&vdev->memory_lock); + if (!__vfio_pci_memory_enabled(vdev)) { + up_read(&vdev->memory_lock); + return -EIO; + } + } + + if (bar == PCI_ROM_RESOURCE) { + /* + * The ROM can fill less space than the BAR, so we start the + * excluded range at the end of the actual ROM. This makes + * filling large ROM BARs much faster. + */ + io = pci_map_rom(pdev, &x_start); + if (!io) { + done = -ENOMEM; + goto out; + } + x_end = end; + } else { + int ret = vfio_pci_setup_barmap(vdev, bar); + if (ret) { + done = ret; + goto out; + } + + io = vdev->barmap[bar]; + } + + if (bar == vdev->msix_bar) { + x_start = vdev->msix_offset; + x_end = vdev->msix_offset + vdev->msix_size; + } + + done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite); + + if (done >= 0) + *ppos += done; + + if (bar == PCI_ROM_RESOURCE) + pci_unmap_rom(pdev, io); +out: + if (res->flags & IORESOURCE_MEM) + up_read(&vdev->memory_lock); + + return done; +} + +ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite) +{ + int ret; + loff_t off, pos = *ppos & VFIO_PCI_OFFSET_MASK; + void __iomem *iomem = NULL; + unsigned int rsrc; + bool is_ioport; + ssize_t done; + + if (!vdev->has_vga) + return -EINVAL; + + if (pos > 0xbfffful) + return -EINVAL; + + switch ((u32)pos) { + case 0xa0000 ... 0xbffff: + count = min(count, (size_t)(0xc0000 - pos)); + iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1); + off = pos - 0xa0000; + rsrc = VGA_RSRC_LEGACY_MEM; + is_ioport = false; + break; + case 0x3b0 ... 0x3bb: + count = min(count, (size_t)(0x3bc - pos)); + iomem = ioport_map(0x3b0, 0x3bb - 0x3b0 + 1); + off = pos - 0x3b0; + rsrc = VGA_RSRC_LEGACY_IO; + is_ioport = true; + break; + case 0x3c0 ... 0x3df: + count = min(count, (size_t)(0x3e0 - pos)); + iomem = ioport_map(0x3c0, 0x3df - 0x3c0 + 1); + off = pos - 0x3c0; + rsrc = VGA_RSRC_LEGACY_IO; + is_ioport = true; + break; + default: + return -EINVAL; + } + + if (!iomem) + return -ENOMEM; + + ret = vga_get_interruptible(vdev->pdev, rsrc); + if (ret) { + is_ioport ? ioport_unmap(iomem) : iounmap(iomem); + return ret; + } + + done = do_io_rw(iomem, buf, off, count, 0, 0, iswrite); + + vga_put(vdev->pdev, rsrc); + + is_ioport ? ioport_unmap(iomem) : iounmap(iomem); + + if (done >= 0) + *ppos += done; + + return done; +} + +static int vfio_pci_ioeventfd_handler(void *opaque, void *unused) +{ + struct vfio_pci_ioeventfd *ioeventfd = opaque; + + switch (ioeventfd->count) { + case 1: + vfio_iowrite8(ioeventfd->data, ioeventfd->addr); + break; + case 2: + vfio_iowrite16(ioeventfd->data, ioeventfd->addr); + break; + case 4: + vfio_iowrite32(ioeventfd->data, ioeventfd->addr); + break; +#ifdef iowrite64 + case 8: + vfio_iowrite64(ioeventfd->data, ioeventfd->addr); + break; +#endif + } + + return 0; +} + +long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset, + uint64_t data, int count, int fd) +{ + struct pci_dev *pdev = vdev->pdev; + loff_t pos = offset & VFIO_PCI_OFFSET_MASK; + int ret, bar = VFIO_PCI_OFFSET_TO_INDEX(offset); + struct vfio_pci_ioeventfd *ioeventfd; + + /* Only support ioeventfds into BARs */ + if (bar > VFIO_PCI_BAR5_REGION_INDEX) + return -EINVAL; + + if (pos + count > pci_resource_len(pdev, bar)) + return -EINVAL; + + /* Disallow ioeventfds working around MSI-X table writes */ + if (bar == vdev->msix_bar && + !(pos + count <= vdev->msix_offset || + pos >= vdev->msix_offset + vdev->msix_size)) + return -EINVAL; + +#ifndef iowrite64 + if (count == 8) + return -EINVAL; +#endif + + ret = vfio_pci_setup_barmap(vdev, bar); + if (ret) + return ret; + + mutex_lock(&vdev->ioeventfds_lock); + + list_for_each_entry(ioeventfd, &vdev->ioeventfds_list, next) { + if (ioeventfd->pos == pos && ioeventfd->bar == bar && + ioeventfd->data == data && ioeventfd->count == count) { + if (fd == -1) { + vfio_virqfd_disable(&ioeventfd->virqfd); + list_del(&ioeventfd->next); + vdev->ioeventfds_nr--; + kfree(ioeventfd); + ret = 0; + } else + ret = -EEXIST; + + goto out_unlock; + } + } + + if (fd < 0) { + ret = -ENODEV; + goto out_unlock; + } + + if (vdev->ioeventfds_nr >= VFIO_PCI_IOEVENTFD_MAX) { + ret = -ENOSPC; + goto out_unlock; + } + + ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL); + if (!ioeventfd) { + ret = -ENOMEM; + goto out_unlock; + } + + ioeventfd->addr = vdev->barmap[bar] + pos; + ioeventfd->data = data; + ioeventfd->pos = pos; + ioeventfd->bar = bar; + ioeventfd->count = count; + + ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler, + NULL, NULL, &ioeventfd->virqfd, fd); + if (ret) { + kfree(ioeventfd); + goto out_unlock; + } + + list_add(&ioeventfd->next, &vdev->ioeventfds_list); + vdev->ioeventfds_nr++; + +out_unlock: + mutex_unlock(&vdev->ioeventfds_lock); + + return ret; +} diff --git a/drivers/vfio/platform/Kconfig b/drivers/vfio/platform/Kconfig new file mode 100644 index 000000000..bb3012878 --- /dev/null +++ b/drivers/vfio/platform/Kconfig @@ -0,0 +1,22 @@ +config VFIO_PLATFORM + tristate "VFIO support for platform devices" + depends on VFIO && EVENTFD && (ARM || ARM64) + select VFIO_VIRQFD + help + Support for platform devices with VFIO. This is required to make + use of platform devices present on the system using the VFIO + framework. + + If you don't know what to do here, say N. + +config VFIO_AMBA + tristate "VFIO support for AMBA devices" + depends on VFIO_PLATFORM && ARM_AMBA + help + Support for ARM AMBA devices with VFIO. This is required to make + use of ARM AMBA devices present on the system using the VFIO + framework. + + If you don't know what to do here, say N. + +source "drivers/vfio/platform/reset/Kconfig" diff --git a/drivers/vfio/platform/Makefile b/drivers/vfio/platform/Makefile new file mode 100644 index 000000000..3f3a24e7c --- /dev/null +++ b/drivers/vfio/platform/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +vfio-platform-base-y := vfio_platform_common.o vfio_platform_irq.o +vfio-platform-y := vfio_platform.o + +obj-$(CONFIG_VFIO_PLATFORM) += vfio-platform.o +obj-$(CONFIG_VFIO_PLATFORM) += vfio-platform-base.o +obj-$(CONFIG_VFIO_PLATFORM) += reset/ + +vfio-amba-y := vfio_amba.o + +obj-$(CONFIG_VFIO_AMBA) += vfio-amba.o +obj-$(CONFIG_VFIO_AMBA) += vfio-platform-base.o +obj-$(CONFIG_VFIO_AMBA) += reset/ diff --git a/drivers/vfio/platform/reset/Kconfig b/drivers/vfio/platform/reset/Kconfig new file mode 100644 index 000000000..392e3c09d --- /dev/null +++ b/drivers/vfio/platform/reset/Kconfig @@ -0,0 +1,24 @@ +config VFIO_PLATFORM_CALXEDAXGMAC_RESET + tristate "VFIO support for calxeda xgmac reset" + depends on VFIO_PLATFORM + help + Enables the VFIO platform driver to handle reset for Calxeda xgmac + + If you don't know what to do here, say N. + +config VFIO_PLATFORM_AMDXGBE_RESET + tristate "VFIO support for AMD XGBE reset" + depends on VFIO_PLATFORM + help + Enables the VFIO platform driver to handle reset for AMD XGBE + + If you don't know what to do here, say N. + +config VFIO_PLATFORM_BCMFLEXRM_RESET + tristate "VFIO support for Broadcom FlexRM reset" + depends on VFIO_PLATFORM && (ARCH_BCM_IPROC || COMPILE_TEST) + default ARCH_BCM_IPROC + help + Enables the VFIO platform driver to handle reset for Broadcom FlexRM + + If you don't know what to do here, say N. diff --git a/drivers/vfio/platform/reset/Makefile b/drivers/vfio/platform/reset/Makefile new file mode 100644 index 000000000..57abd4f0a --- /dev/null +++ b/drivers/vfio/platform/reset/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +vfio-platform-calxedaxgmac-y := vfio_platform_calxedaxgmac.o +vfio-platform-amdxgbe-y := vfio_platform_amdxgbe.o + +ccflags-y += -Idrivers/vfio/platform + +obj-$(CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET) += vfio-platform-calxedaxgmac.o +obj-$(CONFIG_VFIO_PLATFORM_AMDXGBE_RESET) += vfio-platform-amdxgbe.o +obj-$(CONFIG_VFIO_PLATFORM_BCMFLEXRM_RESET) += vfio_platform_bcmflexrm.o diff --git a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c new file mode 100644 index 000000000..bcd419cfd --- /dev/null +++ b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c @@ -0,0 +1,127 @@ +/* + * VFIO platform driver specialized for AMD xgbe reset + * reset code is inherited from AMD xgbe native driver + * + * Copyright (c) 2015 Linaro Ltd. + * www.linaro.org + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/io.h> +#include <uapi/linux/mdio.h> +#include <linux/delay.h> + +#include "vfio_platform_private.h" + +#define DMA_MR 0x3000 +#define MAC_VR 0x0110 +#define DMA_ISR 0x3008 +#define MAC_ISR 0x00b0 +#define PCS_MMD_SELECT 0xff +#define MDIO_AN_INT 0x8002 +#define MDIO_AN_INTMASK 0x8001 + +static unsigned int xmdio_read(void *ioaddr, unsigned int mmd, + unsigned int reg) +{ + unsigned int mmd_address, value; + + mmd_address = (mmd << 16) | ((reg) & 0xffff); + iowrite32(mmd_address >> 8, ioaddr + (PCS_MMD_SELECT << 2)); + value = ioread32(ioaddr + ((mmd_address & 0xff) << 2)); + return value; +} + +static void xmdio_write(void *ioaddr, unsigned int mmd, + unsigned int reg, unsigned int value) +{ + unsigned int mmd_address; + + mmd_address = (mmd << 16) | ((reg) & 0xffff); + iowrite32(mmd_address >> 8, ioaddr + (PCS_MMD_SELECT << 2)); + iowrite32(value, ioaddr + ((mmd_address & 0xff) << 2)); +} + +static int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev) +{ + struct vfio_platform_region *xgmac_regs = &vdev->regions[0]; + struct vfio_platform_region *xpcs_regs = &vdev->regions[1]; + u32 dma_mr_value, pcs_value, value; + unsigned int count; + + if (!xgmac_regs->ioaddr) { + xgmac_regs->ioaddr = + ioremap_nocache(xgmac_regs->addr, xgmac_regs->size); + if (!xgmac_regs->ioaddr) + return -ENOMEM; + } + if (!xpcs_regs->ioaddr) { + xpcs_regs->ioaddr = + ioremap_nocache(xpcs_regs->addr, xpcs_regs->size); + if (!xpcs_regs->ioaddr) + return -ENOMEM; + } + + /* reset the PHY through MDIO*/ + pcs_value = xmdio_read(xpcs_regs->ioaddr, MDIO_MMD_PCS, MDIO_CTRL1); + pcs_value |= MDIO_CTRL1_RESET; + xmdio_write(xpcs_regs->ioaddr, MDIO_MMD_PCS, MDIO_CTRL1, pcs_value); + + count = 50; + do { + msleep(20); + pcs_value = xmdio_read(xpcs_regs->ioaddr, MDIO_MMD_PCS, + MDIO_CTRL1); + } while ((pcs_value & MDIO_CTRL1_RESET) && --count); + + if (pcs_value & MDIO_CTRL1_RESET) + pr_warn("%s XGBE PHY reset timeout\n", __func__); + + /* disable auto-negotiation */ + value = xmdio_read(xpcs_regs->ioaddr, MDIO_MMD_AN, MDIO_CTRL1); + value &= ~MDIO_AN_CTRL1_ENABLE; + xmdio_write(xpcs_regs->ioaddr, MDIO_MMD_AN, MDIO_CTRL1, value); + + /* disable AN IRQ */ + xmdio_write(xpcs_regs->ioaddr, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); + + /* clear AN IRQ */ + xmdio_write(xpcs_regs->ioaddr, MDIO_MMD_AN, MDIO_AN_INT, 0); + + /* MAC software reset */ + dma_mr_value = ioread32(xgmac_regs->ioaddr + DMA_MR); + dma_mr_value |= 0x1; + iowrite32(dma_mr_value, xgmac_regs->ioaddr + DMA_MR); + + usleep_range(10, 15); + + count = 2000; + while (--count && (ioread32(xgmac_regs->ioaddr + DMA_MR) & 1)) + usleep_range(500, 600); + + if (!count) + pr_warn("%s MAC SW reset failed\n", __func__); + + return 0; +} + +module_vfio_reset_handler("amd,xgbe-seattle-v1a", vfio_platform_amdxgbe_reset); + +MODULE_VERSION("0.1"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Eric Auger <eric.auger@linaro.org>"); +MODULE_DESCRIPTION("Reset support for AMD xgbe vfio platform device"); diff --git a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c new file mode 100644 index 000000000..d45c3be71 --- /dev/null +++ b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2017 Broadcom + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * This driver provides reset support for Broadcom FlexRM ring manager + * to VFIO platform. + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#include "vfio_platform_private.h" + +/* FlexRM configuration */ +#define RING_REGS_SIZE 0x10000 +#define RING_VER_MAGIC 0x76303031 + +/* Per-Ring register offsets */ +#define RING_VER 0x000 +#define RING_CONTROL 0x034 +#define RING_FLUSH_DONE 0x038 + +/* Register RING_CONTROL fields */ +#define CONTROL_FLUSH_SHIFT 5 + +/* Register RING_FLUSH_DONE fields */ +#define FLUSH_DONE_MASK 0x1 + +static int vfio_platform_bcmflexrm_shutdown(void __iomem *ring) +{ + unsigned int timeout; + + /* Disable/inactivate ring */ + writel_relaxed(0x0, ring + RING_CONTROL); + + /* Set ring flush state */ + timeout = 1000; /* timeout of 1s */ + writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), ring + RING_CONTROL); + do { + if (readl_relaxed(ring + RING_FLUSH_DONE) & + FLUSH_DONE_MASK) + break; + mdelay(1); + } while (--timeout); + if (!timeout) + return -ETIMEDOUT; + + /* Clear ring flush state */ + timeout = 1000; /* timeout of 1s */ + writel_relaxed(0x0, ring + RING_CONTROL); + do { + if (!(readl_relaxed(ring + RING_FLUSH_DONE) & + FLUSH_DONE_MASK)) + break; + mdelay(1); + } while (--timeout); + if (!timeout) + return -ETIMEDOUT; + + return 0; +} + +static int vfio_platform_bcmflexrm_reset(struct vfio_platform_device *vdev) +{ + void __iomem *ring; + int rc = 0, ret = 0, ring_num = 0; + struct vfio_platform_region *reg = &vdev->regions[0]; + + /* Map FlexRM ring registers if not mapped */ + if (!reg->ioaddr) { + reg->ioaddr = ioremap_nocache(reg->addr, reg->size); + if (!reg->ioaddr) + return -ENOMEM; + } + + /* Discover and shutdown each FlexRM ring */ + for (ring = reg->ioaddr; + ring < (reg->ioaddr + reg->size); ring += RING_REGS_SIZE) { + if (readl_relaxed(ring + RING_VER) == RING_VER_MAGIC) { + rc = vfio_platform_bcmflexrm_shutdown(ring); + if (rc) { + dev_warn(vdev->device, + "FlexRM ring%d shutdown error %d\n", + ring_num, rc); + ret |= rc; + } + ring_num++; + } + } + + return ret; +} + +module_vfio_reset_handler("brcm,iproc-flexrm-mbox", + vfio_platform_bcmflexrm_reset); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>"); +MODULE_DESCRIPTION("Reset support for Broadcom FlexRM VFIO platform device"); diff --git a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c new file mode 100644 index 000000000..49e5df6e8 --- /dev/null +++ b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c @@ -0,0 +1,85 @@ +/* + * VFIO platform driver specialized for Calxeda xgmac reset + * reset code is inherited from calxeda xgmac native driver + * + * Copyright 2010-2011 Calxeda, Inc. + * Copyright (c) 2015 Linaro Ltd. + * www.linaro.org + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/io.h> + +#include "vfio_platform_private.h" + +#define DRIVER_VERSION "0.1" +#define DRIVER_AUTHOR "Eric Auger <eric.auger@linaro.org>" +#define DRIVER_DESC "Reset support for Calxeda xgmac vfio platform device" + +/* XGMAC Register definitions */ +#define XGMAC_CONTROL 0x00000000 /* MAC Configuration */ + +/* DMA Control and Status Registers */ +#define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */ +#define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */ + +/* DMA Control registe defines */ +#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ +#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ + +/* Common MAC defines */ +#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ +#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */ + +static inline void xgmac_mac_disable(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + XGMAC_DMA_CONTROL); + + value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); + writel(value, ioaddr + XGMAC_DMA_CONTROL); + + value = readl(ioaddr + XGMAC_CONTROL); + value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX); + writel(value, ioaddr + XGMAC_CONTROL); +} + +static int vfio_platform_calxedaxgmac_reset(struct vfio_platform_device *vdev) +{ + struct vfio_platform_region *reg = &vdev->regions[0]; + + if (!reg->ioaddr) { + reg->ioaddr = + ioremap_nocache(reg->addr, reg->size); + if (!reg->ioaddr) + return -ENOMEM; + } + + /* disable IRQ */ + writel(0, reg->ioaddr + XGMAC_DMA_INTR_ENA); + + /* Disable the MAC core */ + xgmac_mac_disable(reg->ioaddr); + + return 0; +} + +module_vfio_reset_handler("calxeda,hb-xgmac", vfio_platform_calxedaxgmac_reset); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c new file mode 100644 index 000000000..62dfbfeaa --- /dev/null +++ b/drivers/vfio/platform/vfio_amba.c @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2013 - Virtual Open Systems + * Author: Antonios Motakis <a.motakis@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/vfio.h> +#include <linux/amba/bus.h> + +#include "vfio_platform_private.h" + +#define DRIVER_VERSION "0.10" +#define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>" +#define DRIVER_DESC "VFIO for AMBA devices - User Level meta-driver" + +/* probing devices from the AMBA bus */ + +static struct resource *get_amba_resource(struct vfio_platform_device *vdev, + int i) +{ + struct amba_device *adev = (struct amba_device *) vdev->opaque; + + if (i == 0) + return &adev->res; + + return NULL; +} + +static int get_amba_irq(struct vfio_platform_device *vdev, int i) +{ + struct amba_device *adev = (struct amba_device *) vdev->opaque; + int ret = 0; + + if (i < AMBA_NR_IRQS) + ret = adev->irq[i]; + + /* zero is an unset IRQ for AMBA devices */ + return ret ? ret : -ENXIO; +} + +static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id) +{ + struct vfio_platform_device *vdev; + int ret; + + vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); + if (!vdev) + return -ENOMEM; + + vdev->name = kasprintf(GFP_KERNEL, "vfio-amba-%08x", adev->periphid); + if (!vdev->name) { + kfree(vdev); + return -ENOMEM; + } + + vdev->opaque = (void *) adev; + vdev->flags = VFIO_DEVICE_FLAGS_AMBA; + vdev->get_resource = get_amba_resource; + vdev->get_irq = get_amba_irq; + vdev->parent_module = THIS_MODULE; + vdev->reset_required = false; + + ret = vfio_platform_probe_common(vdev, &adev->dev); + if (ret) { + kfree(vdev->name); + kfree(vdev); + } + + return ret; +} + +static int vfio_amba_remove(struct amba_device *adev) +{ + struct vfio_platform_device *vdev; + + vdev = vfio_platform_remove_common(&adev->dev); + if (vdev) { + kfree(vdev->name); + kfree(vdev); + return 0; + } + + return -EINVAL; +} + +static const struct amba_id pl330_ids[] = { + { 0, 0 }, +}; + +MODULE_DEVICE_TABLE(amba, pl330_ids); + +static struct amba_driver vfio_amba_driver = { + .probe = vfio_amba_probe, + .remove = vfio_amba_remove, + .id_table = pl330_ids, + .drv = { + .name = "vfio-amba", + .owner = THIS_MODULE, + }, +}; + +module_amba_driver(vfio_amba_driver); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c new file mode 100644 index 000000000..6561751a1 --- /dev/null +++ b/drivers/vfio/platform/vfio_platform.c @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2013 - Virtual Open Systems + * Author: Antonios Motakis <a.motakis@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/vfio.h> +#include <linux/platform_device.h> + +#include "vfio_platform_private.h" + +#define DRIVER_VERSION "0.10" +#define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>" +#define DRIVER_DESC "VFIO for platform devices - User Level meta-driver" + +static bool reset_required = true; +module_param(reset_required, bool, 0444); +MODULE_PARM_DESC(reset_required, "override reset requirement (default: 1)"); + +/* probing devices from the linux platform bus */ + +static struct resource *get_platform_resource(struct vfio_platform_device *vdev, + int num) +{ + struct platform_device *dev = (struct platform_device *) vdev->opaque; + int i; + + for (i = 0; i < dev->num_resources; i++) { + struct resource *r = &dev->resource[i]; + + if (resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) { + if (!num) + return r; + + num--; + } + } + return NULL; +} + +static int get_platform_irq(struct vfio_platform_device *vdev, int i) +{ + struct platform_device *pdev = (struct platform_device *) vdev->opaque; + + return platform_get_irq(pdev, i); +} + +static int vfio_platform_probe(struct platform_device *pdev) +{ + struct vfio_platform_device *vdev; + int ret; + + vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); + if (!vdev) + return -ENOMEM; + + vdev->opaque = (void *) pdev; + vdev->name = pdev->name; + vdev->flags = VFIO_DEVICE_FLAGS_PLATFORM; + vdev->get_resource = get_platform_resource; + vdev->get_irq = get_platform_irq; + vdev->parent_module = THIS_MODULE; + vdev->reset_required = reset_required; + + ret = vfio_platform_probe_common(vdev, &pdev->dev); + if (ret) + kfree(vdev); + + return ret; +} + +static int vfio_platform_remove(struct platform_device *pdev) +{ + struct vfio_platform_device *vdev; + + vdev = vfio_platform_remove_common(&pdev->dev); + if (vdev) { + kfree(vdev); + return 0; + } + + return -EINVAL; +} + +static struct platform_driver vfio_platform_driver = { + .probe = vfio_platform_probe, + .remove = vfio_platform_remove, + .driver = { + .name = "vfio-platform", + }, +}; + +module_platform_driver(vfio_platform_driver); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c new file mode 100644 index 000000000..c29fc6844 --- /dev/null +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -0,0 +1,756 @@ +/* + * Copyright (C) 2013 - Virtual Open Systems + * Author: Antonios Motakis <a.motakis@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/acpi.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include <linux/vfio.h> + +#include "vfio_platform_private.h" + +#define DRIVER_VERSION "0.10" +#define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>" +#define DRIVER_DESC "VFIO platform base module" + +#define VFIO_PLATFORM_IS_ACPI(vdev) ((vdev)->acpihid != NULL) + +static LIST_HEAD(reset_list); +static DEFINE_MUTEX(driver_lock); + +static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat, + struct module **module) +{ + struct vfio_platform_reset_node *iter; + vfio_platform_reset_fn_t reset_fn = NULL; + + mutex_lock(&driver_lock); + list_for_each_entry(iter, &reset_list, link) { + if (!strcmp(iter->compat, compat) && + try_module_get(iter->owner)) { + *module = iter->owner; + reset_fn = iter->of_reset; + break; + } + } + mutex_unlock(&driver_lock); + return reset_fn; +} + +static int vfio_platform_acpi_probe(struct vfio_platform_device *vdev, + struct device *dev) +{ + struct acpi_device *adev; + + if (acpi_disabled) + return -ENOENT; + + adev = ACPI_COMPANION(dev); + if (!adev) { + pr_err("VFIO: ACPI companion device not found for %s\n", + vdev->name); + return -ENODEV; + } + +#ifdef CONFIG_ACPI + vdev->acpihid = acpi_device_hid(adev); +#endif + return WARN_ON(!vdev->acpihid) ? -EINVAL : 0; +} + +static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev, + const char **extra_dbg) +{ +#ifdef CONFIG_ACPI + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct device *dev = vdev->device; + acpi_handle handle = ACPI_HANDLE(dev); + acpi_status acpi_ret; + + acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, &buffer); + if (ACPI_FAILURE(acpi_ret)) { + if (extra_dbg) + *extra_dbg = acpi_format_exception(acpi_ret); + return -EINVAL; + } + + return 0; +#else + return -ENOENT; +#endif +} + +static bool vfio_platform_acpi_has_reset(struct vfio_platform_device *vdev) +{ +#ifdef CONFIG_ACPI + struct device *dev = vdev->device; + acpi_handle handle = ACPI_HANDLE(dev); + + return acpi_has_method(handle, "_RST"); +#else + return false; +#endif +} + +static bool vfio_platform_has_reset(struct vfio_platform_device *vdev) +{ + if (VFIO_PLATFORM_IS_ACPI(vdev)) + return vfio_platform_acpi_has_reset(vdev); + + return vdev->of_reset ? true : false; +} + +static int vfio_platform_get_reset(struct vfio_platform_device *vdev) +{ + if (VFIO_PLATFORM_IS_ACPI(vdev)) + return vfio_platform_acpi_has_reset(vdev) ? 0 : -ENOENT; + + vdev->of_reset = vfio_platform_lookup_reset(vdev->compat, + &vdev->reset_module); + if (!vdev->of_reset) { + request_module("vfio-reset:%s", vdev->compat); + vdev->of_reset = vfio_platform_lookup_reset(vdev->compat, + &vdev->reset_module); + } + + return vdev->of_reset ? 0 : -ENOENT; +} + +static void vfio_platform_put_reset(struct vfio_platform_device *vdev) +{ + if (VFIO_PLATFORM_IS_ACPI(vdev)) + return; + + if (vdev->of_reset) + module_put(vdev->reset_module); +} + +static int vfio_platform_regions_init(struct vfio_platform_device *vdev) +{ + int cnt = 0, i; + + while (vdev->get_resource(vdev, cnt)) + cnt++; + + vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region), + GFP_KERNEL); + if (!vdev->regions) + return -ENOMEM; + + for (i = 0; i < cnt; i++) { + struct resource *res = + vdev->get_resource(vdev, i); + + if (!res) + goto err; + + vdev->regions[i].addr = res->start; + vdev->regions[i].size = resource_size(res); + vdev->regions[i].flags = 0; + + switch (resource_type(res)) { + case IORESOURCE_MEM: + vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO; + vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; + if (!(res->flags & IORESOURCE_READONLY)) + vdev->regions[i].flags |= + VFIO_REGION_INFO_FLAG_WRITE; + + /* + * Only regions addressed with PAGE granularity may be + * MMAPed securely. + */ + if (!(vdev->regions[i].addr & ~PAGE_MASK) && + !(vdev->regions[i].size & ~PAGE_MASK)) + vdev->regions[i].flags |= + VFIO_REGION_INFO_FLAG_MMAP; + + break; + case IORESOURCE_IO: + vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO; + break; + default: + goto err; + } + } + + vdev->num_regions = cnt; + + return 0; +err: + kfree(vdev->regions); + return -EINVAL; +} + +static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev) +{ + int i; + + for (i = 0; i < vdev->num_regions; i++) + iounmap(vdev->regions[i].ioaddr); + + vdev->num_regions = 0; + kfree(vdev->regions); +} + +static int vfio_platform_call_reset(struct vfio_platform_device *vdev, + const char **extra_dbg) +{ + if (VFIO_PLATFORM_IS_ACPI(vdev)) { + dev_info(vdev->device, "reset\n"); + return vfio_platform_acpi_call_reset(vdev, extra_dbg); + } else if (vdev->of_reset) { + dev_info(vdev->device, "reset\n"); + return vdev->of_reset(vdev); + } + + dev_warn(vdev->device, "no reset function found!\n"); + return -EINVAL; +} + +static void vfio_platform_release(void *device_data) +{ + struct vfio_platform_device *vdev = device_data; + + mutex_lock(&driver_lock); + + if (!(--vdev->refcnt)) { + const char *extra_dbg = NULL; + int ret; + + ret = vfio_platform_call_reset(vdev, &extra_dbg); + if (ret && vdev->reset_required) { + dev_warn(vdev->device, "reset driver is required and reset call failed in release (%d) %s\n", + ret, extra_dbg ? extra_dbg : ""); + WARN_ON(1); + } + pm_runtime_put(vdev->device); + vfio_platform_regions_cleanup(vdev); + vfio_platform_irq_cleanup(vdev); + } + + mutex_unlock(&driver_lock); + + module_put(vdev->parent_module); +} + +static int vfio_platform_open(void *device_data) +{ + struct vfio_platform_device *vdev = device_data; + int ret; + + if (!try_module_get(vdev->parent_module)) + return -ENODEV; + + mutex_lock(&driver_lock); + + if (!vdev->refcnt) { + const char *extra_dbg = NULL; + + ret = vfio_platform_regions_init(vdev); + if (ret) + goto err_reg; + + ret = vfio_platform_irq_init(vdev); + if (ret) + goto err_irq; + + ret = pm_runtime_get_sync(vdev->device); + if (ret < 0) + goto err_rst; + + ret = vfio_platform_call_reset(vdev, &extra_dbg); + if (ret && vdev->reset_required) { + dev_warn(vdev->device, "reset driver is required and reset call failed in open (%d) %s\n", + ret, extra_dbg ? extra_dbg : ""); + goto err_rst; + } + } + + vdev->refcnt++; + + mutex_unlock(&driver_lock); + return 0; + +err_rst: + pm_runtime_put(vdev->device); + vfio_platform_irq_cleanup(vdev); +err_irq: + vfio_platform_regions_cleanup(vdev); +err_reg: + mutex_unlock(&driver_lock); + module_put(vdev->parent_module); + return ret; +} + +static long vfio_platform_ioctl(void *device_data, + unsigned int cmd, unsigned long arg) +{ + struct vfio_platform_device *vdev = device_data; + unsigned long minsz; + + if (cmd == VFIO_DEVICE_GET_INFO) { + struct vfio_device_info info; + + minsz = offsetofend(struct vfio_device_info, num_irqs); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + if (vfio_platform_has_reset(vdev)) + vdev->flags |= VFIO_DEVICE_FLAGS_RESET; + info.flags = vdev->flags; + info.num_regions = vdev->num_regions; + info.num_irqs = vdev->num_irqs; + + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { + struct vfio_region_info info; + + minsz = offsetofend(struct vfio_region_info, offset); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + if (info.index >= vdev->num_regions) + return -EINVAL; + + /* map offset to the physical address */ + info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index); + info.size = vdev->regions[info.index].size; + info.flags = vdev->regions[info.index].flags; + + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { + struct vfio_irq_info info; + + minsz = offsetofend(struct vfio_irq_info, count); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + if (info.index >= vdev->num_irqs) + return -EINVAL; + + info.flags = vdev->irqs[info.index].flags; + info.count = vdev->irqs[info.index].count; + + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_SET_IRQS) { + struct vfio_irq_set hdr; + u8 *data = NULL; + int ret = 0; + size_t data_size = 0; + + minsz = offsetofend(struct vfio_irq_set, count); + + if (copy_from_user(&hdr, (void __user *)arg, minsz)) + return -EFAULT; + + ret = vfio_set_irqs_validate_and_prepare(&hdr, vdev->num_irqs, + vdev->num_irqs, &data_size); + if (ret) + return ret; + + if (data_size) { + data = memdup_user((void __user *)(arg + minsz), + data_size); + if (IS_ERR(data)) + return PTR_ERR(data); + } + + mutex_lock(&vdev->igate); + + ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index, + hdr.start, hdr.count, data); + mutex_unlock(&vdev->igate); + kfree(data); + + return ret; + + } else if (cmd == VFIO_DEVICE_RESET) { + return vfio_platform_call_reset(vdev, NULL); + } + + return -ENOTTY; +} + +static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg, + char __user *buf, size_t count, + loff_t off) +{ + unsigned int done = 0; + + if (!reg->ioaddr) { + reg->ioaddr = + ioremap_nocache(reg->addr, reg->size); + + if (!reg->ioaddr) + return -ENOMEM; + } + + while (count) { + size_t filled; + + if (count >= 4 && !(off % 4)) { + u32 val; + + val = ioread32(reg->ioaddr + off); + if (copy_to_user(buf, &val, 4)) + goto err; + + filled = 4; + } else if (count >= 2 && !(off % 2)) { + u16 val; + + val = ioread16(reg->ioaddr + off); + if (copy_to_user(buf, &val, 2)) + goto err; + + filled = 2; + } else { + u8 val; + + val = ioread8(reg->ioaddr + off); + if (copy_to_user(buf, &val, 1)) + goto err; + + filled = 1; + } + + + count -= filled; + done += filled; + off += filled; + buf += filled; + } + + return done; +err: + return -EFAULT; +} + +static ssize_t vfio_platform_read(void *device_data, char __user *buf, + size_t count, loff_t *ppos) +{ + struct vfio_platform_device *vdev = device_data; + unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos); + loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK; + + if (index >= vdev->num_regions) + return -EINVAL; + + if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)) + return -EINVAL; + + if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO) + return vfio_platform_read_mmio(&vdev->regions[index], + buf, count, off); + else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO) + return -EINVAL; /* not implemented */ + + return -EINVAL; +} + +static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg, + const char __user *buf, size_t count, + loff_t off) +{ + unsigned int done = 0; + + if (!reg->ioaddr) { + reg->ioaddr = + ioremap_nocache(reg->addr, reg->size); + + if (!reg->ioaddr) + return -ENOMEM; + } + + while (count) { + size_t filled; + + if (count >= 4 && !(off % 4)) { + u32 val; + + if (copy_from_user(&val, buf, 4)) + goto err; + iowrite32(val, reg->ioaddr + off); + + filled = 4; + } else if (count >= 2 && !(off % 2)) { + u16 val; + + if (copy_from_user(&val, buf, 2)) + goto err; + iowrite16(val, reg->ioaddr + off); + + filled = 2; + } else { + u8 val; + + if (copy_from_user(&val, buf, 1)) + goto err; + iowrite8(val, reg->ioaddr + off); + + filled = 1; + } + + count -= filled; + done += filled; + off += filled; + buf += filled; + } + + return done; +err: + return -EFAULT; +} + +static ssize_t vfio_platform_write(void *device_data, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct vfio_platform_device *vdev = device_data; + unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos); + loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK; + + if (index >= vdev->num_regions) + return -EINVAL; + + if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)) + return -EINVAL; + + if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO) + return vfio_platform_write_mmio(&vdev->regions[index], + buf, count, off); + else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO) + return -EINVAL; /* not implemented */ + + return -EINVAL; +} + +static int vfio_platform_mmap_mmio(struct vfio_platform_region region, + struct vm_area_struct *vma) +{ + u64 req_len, pgoff, req_start; + + req_len = vma->vm_end - vma->vm_start; + pgoff = vma->vm_pgoff & + ((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + req_start = pgoff << PAGE_SHIFT; + + if (region.size < PAGE_SIZE || req_start + req_len > region.size) + return -EINVAL; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff; + + return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + req_len, vma->vm_page_prot); +} + +static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma) +{ + struct vfio_platform_device *vdev = device_data; + unsigned int index; + + index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT); + + if (vma->vm_end < vma->vm_start) + return -EINVAL; + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + if (index >= vdev->num_regions) + return -EINVAL; + if (vma->vm_start & ~PAGE_MASK) + return -EINVAL; + if (vma->vm_end & ~PAGE_MASK) + return -EINVAL; + + if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP)) + return -EINVAL; + + if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ) + && (vma->vm_flags & VM_READ)) + return -EINVAL; + + if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE) + && (vma->vm_flags & VM_WRITE)) + return -EINVAL; + + vma->vm_private_data = vdev; + + if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO) + return vfio_platform_mmap_mmio(vdev->regions[index], vma); + + else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO) + return -EINVAL; /* not implemented */ + + return -EINVAL; +} + +static const struct vfio_device_ops vfio_platform_ops = { + .name = "vfio-platform", + .open = vfio_platform_open, + .release = vfio_platform_release, + .ioctl = vfio_platform_ioctl, + .read = vfio_platform_read, + .write = vfio_platform_write, + .mmap = vfio_platform_mmap, +}; + +static int vfio_platform_of_probe(struct vfio_platform_device *vdev, + struct device *dev) +{ + int ret; + + ret = device_property_read_string(dev, "compatible", + &vdev->compat); + if (ret) + pr_err("VFIO: Cannot retrieve compat for %s\n", vdev->name); + + return ret; +} + +/* + * There can be two kernel build combinations. One build where + * ACPI is not selected in Kconfig and another one with the ACPI Kconfig. + * + * In the first case, vfio_platform_acpi_probe will return since + * acpi_disabled is 1. DT user will not see any kind of messages from + * ACPI. + * + * In the second case, both DT and ACPI is compiled in but the system is + * booting with any of these combinations. + * + * If the firmware is DT type, then acpi_disabled is 1. The ACPI probe routine + * terminates immediately without any messages. + * + * If the firmware is ACPI type, then acpi_disabled is 0. All other checks are + * valid checks. We cannot claim that this system is DT. + */ +int vfio_platform_probe_common(struct vfio_platform_device *vdev, + struct device *dev) +{ + struct iommu_group *group; + int ret; + + if (!vdev) + return -EINVAL; + + ret = vfio_platform_acpi_probe(vdev, dev); + if (ret) + ret = vfio_platform_of_probe(vdev, dev); + + if (ret) + return ret; + + vdev->device = dev; + + ret = vfio_platform_get_reset(vdev); + if (ret && vdev->reset_required) { + pr_err("VFIO: No reset function found for device %s\n", + vdev->name); + return ret; + } + + group = vfio_iommu_group_get(dev); + if (!group) { + pr_err("VFIO: No IOMMU group for device %s\n", vdev->name); + ret = -EINVAL; + goto put_reset; + } + + ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev); + if (ret) + goto put_iommu; + + mutex_init(&vdev->igate); + + pm_runtime_enable(vdev->device); + return 0; + +put_iommu: + vfio_iommu_group_put(group, dev); +put_reset: + vfio_platform_put_reset(vdev); + return ret; +} +EXPORT_SYMBOL_GPL(vfio_platform_probe_common); + +struct vfio_platform_device *vfio_platform_remove_common(struct device *dev) +{ + struct vfio_platform_device *vdev; + + vdev = vfio_del_group_dev(dev); + + if (vdev) { + pm_runtime_disable(vdev->device); + vfio_platform_put_reset(vdev); + vfio_iommu_group_put(dev->iommu_group, dev); + } + + return vdev; +} +EXPORT_SYMBOL_GPL(vfio_platform_remove_common); + +void __vfio_platform_register_reset(struct vfio_platform_reset_node *node) +{ + mutex_lock(&driver_lock); + list_add(&node->link, &reset_list); + mutex_unlock(&driver_lock); +} +EXPORT_SYMBOL_GPL(__vfio_platform_register_reset); + +void vfio_platform_unregister_reset(const char *compat, + vfio_platform_reset_fn_t fn) +{ + struct vfio_platform_reset_node *iter, *temp; + + mutex_lock(&driver_lock); + list_for_each_entry_safe(iter, temp, &reset_list, link) { + if (!strcmp(iter->compat, compat) && (iter->of_reset == fn)) { + list_del(&iter->link); + break; + } + } + + mutex_unlock(&driver_lock); + +} +EXPORT_SYMBOL_GPL(vfio_platform_unregister_reset); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/vfio/platform/vfio_platform_irq.c b/drivers/vfio/platform/vfio_platform_irq.c new file mode 100644 index 000000000..46d4750f4 --- /dev/null +++ b/drivers/vfio/platform/vfio_platform_irq.c @@ -0,0 +1,337 @@ +/* + * VFIO platform devices interrupt handling + * + * Copyright (C) 2013 - Virtual Open Systems + * Author: Antonios Motakis <a.motakis@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/eventfd.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/vfio.h> +#include <linux/irq.h> + +#include "vfio_platform_private.h" + +static void vfio_platform_mask(struct vfio_platform_irq *irq_ctx) +{ + unsigned long flags; + + spin_lock_irqsave(&irq_ctx->lock, flags); + + if (!irq_ctx->masked) { + disable_irq_nosync(irq_ctx->hwirq); + irq_ctx->masked = true; + } + + spin_unlock_irqrestore(&irq_ctx->lock, flags); +} + +static int vfio_platform_mask_handler(void *opaque, void *unused) +{ + struct vfio_platform_irq *irq_ctx = opaque; + + vfio_platform_mask(irq_ctx); + + return 0; +} + +static int vfio_platform_set_irq_mask(struct vfio_platform_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, + void *data) +{ + if (start != 0 || count != 1) + return -EINVAL; + + if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE)) + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t fd = *(int32_t *)data; + + if (fd >= 0) + return vfio_virqfd_enable((void *) &vdev->irqs[index], + vfio_platform_mask_handler, + NULL, NULL, + &vdev->irqs[index].mask, fd); + + vfio_virqfd_disable(&vdev->irqs[index].mask); + return 0; + } + + if (flags & VFIO_IRQ_SET_DATA_NONE) { + vfio_platform_mask(&vdev->irqs[index]); + + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t mask = *(uint8_t *)data; + + if (mask) + vfio_platform_mask(&vdev->irqs[index]); + } + + return 0; +} + +static void vfio_platform_unmask(struct vfio_platform_irq *irq_ctx) +{ + unsigned long flags; + + spin_lock_irqsave(&irq_ctx->lock, flags); + + if (irq_ctx->masked) { + enable_irq(irq_ctx->hwirq); + irq_ctx->masked = false; + } + + spin_unlock_irqrestore(&irq_ctx->lock, flags); +} + +static int vfio_platform_unmask_handler(void *opaque, void *unused) +{ + struct vfio_platform_irq *irq_ctx = opaque; + + vfio_platform_unmask(irq_ctx); + + return 0; +} + +static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, + void *data) +{ + if (start != 0 || count != 1) + return -EINVAL; + + if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE)) + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t fd = *(int32_t *)data; + + if (fd >= 0) + return vfio_virqfd_enable((void *) &vdev->irqs[index], + vfio_platform_unmask_handler, + NULL, NULL, + &vdev->irqs[index].unmask, + fd); + + vfio_virqfd_disable(&vdev->irqs[index].unmask); + return 0; + } + + if (flags & VFIO_IRQ_SET_DATA_NONE) { + vfio_platform_unmask(&vdev->irqs[index]); + + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t unmask = *(uint8_t *)data; + + if (unmask) + vfio_platform_unmask(&vdev->irqs[index]); + } + + return 0; +} + +static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id) +{ + struct vfio_platform_irq *irq_ctx = dev_id; + unsigned long flags; + int ret = IRQ_NONE; + + spin_lock_irqsave(&irq_ctx->lock, flags); + + if (!irq_ctx->masked) { + ret = IRQ_HANDLED; + + /* automask maskable interrupts */ + disable_irq_nosync(irq_ctx->hwirq); + irq_ctx->masked = true; + } + + spin_unlock_irqrestore(&irq_ctx->lock, flags); + + if (ret == IRQ_HANDLED) + eventfd_signal(irq_ctx->trigger, 1); + + return ret; +} + +static irqreturn_t vfio_irq_handler(int irq, void *dev_id) +{ + struct vfio_platform_irq *irq_ctx = dev_id; + + eventfd_signal(irq_ctx->trigger, 1); + + return IRQ_HANDLED; +} + +static int vfio_set_trigger(struct vfio_platform_device *vdev, int index, + int fd, irq_handler_t handler) +{ + struct vfio_platform_irq *irq = &vdev->irqs[index]; + struct eventfd_ctx *trigger; + int ret; + + if (irq->trigger) { + irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN); + free_irq(irq->hwirq, irq); + kfree(irq->name); + eventfd_ctx_put(irq->trigger); + irq->trigger = NULL; + } + + if (fd < 0) /* Disable only */ + return 0; + + irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)", + irq->hwirq, vdev->name); + if (!irq->name) + return -ENOMEM; + + trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(trigger)) { + kfree(irq->name); + return PTR_ERR(trigger); + } + + irq->trigger = trigger; + + irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN); + ret = request_irq(irq->hwirq, handler, 0, irq->name, irq); + if (ret) { + kfree(irq->name); + eventfd_ctx_put(trigger); + irq->trigger = NULL; + return ret; + } + + if (!irq->masked) + enable_irq(irq->hwirq); + + return 0; +} + +static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, + void *data) +{ + struct vfio_platform_irq *irq = &vdev->irqs[index]; + irq_handler_t handler; + + if (vdev->irqs[index].flags & VFIO_IRQ_INFO_AUTOMASKED) + handler = vfio_automasked_irq_handler; + else + handler = vfio_irq_handler; + + if (!count && (flags & VFIO_IRQ_SET_DATA_NONE)) + return vfio_set_trigger(vdev, index, -1, handler); + + if (start != 0 || count != 1) + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t fd = *(int32_t *)data; + + return vfio_set_trigger(vdev, index, fd, handler); + } + + if (flags & VFIO_IRQ_SET_DATA_NONE) { + handler(irq->hwirq, irq); + + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t trigger = *(uint8_t *)data; + + if (trigger) + handler(irq->hwirq, irq); + } + + return 0; +} + +int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev, + uint32_t flags, unsigned index, unsigned start, + unsigned count, void *data) +{ + int (*func)(struct vfio_platform_device *vdev, unsigned index, + unsigned start, unsigned count, uint32_t flags, + void *data) = NULL; + + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_MASK: + func = vfio_platform_set_irq_mask; + break; + case VFIO_IRQ_SET_ACTION_UNMASK: + func = vfio_platform_set_irq_unmask; + break; + case VFIO_IRQ_SET_ACTION_TRIGGER: + func = vfio_platform_set_irq_trigger; + break; + } + + if (!func) + return -ENOTTY; + + return func(vdev, index, start, count, flags, data); +} + +int vfio_platform_irq_init(struct vfio_platform_device *vdev) +{ + int cnt = 0, i; + + while (vdev->get_irq(vdev, cnt) >= 0) + cnt++; + + vdev->irqs = kcalloc(cnt, sizeof(struct vfio_platform_irq), GFP_KERNEL); + if (!vdev->irqs) + return -ENOMEM; + + for (i = 0; i < cnt; i++) { + int hwirq = vdev->get_irq(vdev, i); + + if (hwirq < 0) + goto err; + + spin_lock_init(&vdev->irqs[i].lock); + + vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD; + + if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) + vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE + | VFIO_IRQ_INFO_AUTOMASKED; + + vdev->irqs[i].count = 1; + vdev->irqs[i].hwirq = hwirq; + vdev->irqs[i].masked = false; + } + + vdev->num_irqs = cnt; + + return 0; +err: + kfree(vdev->irqs); + return -EINVAL; +} + +void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev) +{ + int i; + + for (i = 0; i < vdev->num_irqs; i++) + vfio_set_trigger(vdev, i, -1, NULL); + + vdev->num_irqs = 0; + kfree(vdev->irqs); +} diff --git a/drivers/vfio/platform/vfio_platform_private.h b/drivers/vfio/platform/vfio_platform_private.h new file mode 100644 index 000000000..85ffe5d9d --- /dev/null +++ b/drivers/vfio/platform/vfio_platform_private.h @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2013 - Virtual Open Systems + * Author: Antonios Motakis <a.motakis@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef VFIO_PLATFORM_PRIVATE_H +#define VFIO_PLATFORM_PRIVATE_H + +#include <linux/types.h> +#include <linux/interrupt.h> + +#define VFIO_PLATFORM_OFFSET_SHIFT 40 +#define VFIO_PLATFORM_OFFSET_MASK (((u64)(1) << VFIO_PLATFORM_OFFSET_SHIFT) - 1) + +#define VFIO_PLATFORM_OFFSET_TO_INDEX(off) \ + (off >> VFIO_PLATFORM_OFFSET_SHIFT) + +#define VFIO_PLATFORM_INDEX_TO_OFFSET(index) \ + ((u64)(index) << VFIO_PLATFORM_OFFSET_SHIFT) + +struct vfio_platform_irq { + u32 flags; + u32 count; + int hwirq; + char *name; + struct eventfd_ctx *trigger; + bool masked; + spinlock_t lock; + struct virqfd *unmask; + struct virqfd *mask; +}; + +struct vfio_platform_region { + u64 addr; + resource_size_t size; + u32 flags; + u32 type; +#define VFIO_PLATFORM_REGION_TYPE_MMIO 1 +#define VFIO_PLATFORM_REGION_TYPE_PIO 2 + void __iomem *ioaddr; +}; + +struct vfio_platform_device { + struct vfio_platform_region *regions; + u32 num_regions; + struct vfio_platform_irq *irqs; + u32 num_irqs; + int refcnt; + struct mutex igate; + struct module *parent_module; + const char *compat; + const char *acpihid; + struct module *reset_module; + struct device *device; + + /* + * These fields should be filled by the bus specific binder + */ + void *opaque; + const char *name; + uint32_t flags; + /* callbacks to discover device resources */ + struct resource* + (*get_resource)(struct vfio_platform_device *vdev, int i); + int (*get_irq)(struct vfio_platform_device *vdev, int i); + int (*of_reset)(struct vfio_platform_device *vdev); + + bool reset_required; +}; + +typedef int (*vfio_platform_reset_fn_t)(struct vfio_platform_device *vdev); + +struct vfio_platform_reset_node { + struct list_head link; + char *compat; + struct module *owner; + vfio_platform_reset_fn_t of_reset; +}; + +extern int vfio_platform_probe_common(struct vfio_platform_device *vdev, + struct device *dev); +extern struct vfio_platform_device *vfio_platform_remove_common + (struct device *dev); + +extern int vfio_platform_irq_init(struct vfio_platform_device *vdev); +extern void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev); + +extern int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev, + uint32_t flags, unsigned index, + unsigned start, unsigned count, + void *data); + +extern void __vfio_platform_register_reset(struct vfio_platform_reset_node *n); +extern void vfio_platform_unregister_reset(const char *compat, + vfio_platform_reset_fn_t fn); +#define vfio_platform_register_reset(__compat, __reset) \ +static struct vfio_platform_reset_node __reset ## _node = { \ + .owner = THIS_MODULE, \ + .compat = __compat, \ + .of_reset = __reset, \ +}; \ +__vfio_platform_register_reset(&__reset ## _node) + +#define module_vfio_reset_handler(compat, reset) \ +MODULE_ALIAS("vfio-reset:" compat); \ +static int __init reset ## _module_init(void) \ +{ \ + vfio_platform_register_reset(compat, reset); \ + return 0; \ +}; \ +static void __exit reset ## _module_exit(void) \ +{ \ + vfio_platform_unregister_reset(compat, reset); \ +}; \ +module_init(reset ## _module_init); \ +module_exit(reset ## _module_exit) + +#endif /* VFIO_PLATFORM_PRIVATE_H */ diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c new file mode 100644 index 000000000..7a386fb30 --- /dev/null +++ b/drivers/vfio/vfio.c @@ -0,0 +1,2262 @@ +/* + * VFIO core + * + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + */ + +#include <linux/cdev.h> +#include <linux/compat.h> +#include <linux/device.h> +#include <linux/file.h> +#include <linux/anon_inodes.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/iommu.h> +#include <linux/list.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/rwsem.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/stat.h> +#include <linux/string.h> +#include <linux/uaccess.h> +#include <linux/vfio.h> +#include <linux/wait.h> +#include <linux/sched/signal.h> + +#define DRIVER_VERSION "0.3" +#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" +#define DRIVER_DESC "VFIO - User Level meta-driver" + +static struct vfio { + struct class *class; + struct list_head iommu_drivers_list; + struct mutex iommu_drivers_lock; + struct list_head group_list; + struct idr group_idr; + struct mutex group_lock; + struct cdev group_cdev; + dev_t group_devt; + wait_queue_head_t release_q; +} vfio; + +struct vfio_iommu_driver { + const struct vfio_iommu_driver_ops *ops; + struct list_head vfio_next; +}; + +struct vfio_container { + struct kref kref; + struct list_head group_list; + struct rw_semaphore group_lock; + struct vfio_iommu_driver *iommu_driver; + void *iommu_data; + bool noiommu; +}; + +struct vfio_unbound_dev { + struct device *dev; + struct list_head unbound_next; +}; + +struct vfio_group { + struct kref kref; + int minor; + atomic_t container_users; + struct iommu_group *iommu_group; + struct vfio_container *container; + struct list_head device_list; + struct mutex device_lock; + struct device *dev; + struct notifier_block nb; + struct list_head vfio_next; + struct list_head container_next; + struct list_head unbound_list; + struct mutex unbound_lock; + atomic_t opened; + wait_queue_head_t container_q; + bool noiommu; + struct kvm *kvm; + struct blocking_notifier_head notifier; +}; + +struct vfio_device { + struct kref kref; + struct device *dev; + const struct vfio_device_ops *ops; + struct vfio_group *group; + struct list_head group_next; + void *device_data; +}; + +#ifdef CONFIG_VFIO_NOIOMMU +static bool noiommu __read_mostly; +module_param_named(enable_unsafe_noiommu_mode, + noiommu, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)"); +#endif + +/* + * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe + * and remove functions, any use cases other than acquiring the first + * reference for the purpose of calling vfio_add_group_dev() or removing + * that symmetric reference after vfio_del_group_dev() should use the raw + * iommu_group_{get,put} functions. In particular, vfio_iommu_group_put() + * removes the device from the dummy group and cannot be nested. + */ +struct iommu_group *vfio_iommu_group_get(struct device *dev) +{ + struct iommu_group *group; + int __maybe_unused ret; + + group = iommu_group_get(dev); + +#ifdef CONFIG_VFIO_NOIOMMU + /* + * With noiommu enabled, an IOMMU group will be created for a device + * that doesn't already have one and doesn't have an iommu_ops on their + * bus. We set iommudata simply to be able to identify these groups + * as special use and for reclamation later. + */ + if (group || !noiommu || iommu_present(dev->bus)) + return group; + + group = iommu_group_alloc(); + if (IS_ERR(group)) + return NULL; + + iommu_group_set_name(group, "vfio-noiommu"); + iommu_group_set_iommudata(group, &noiommu, NULL); + ret = iommu_group_add_device(group, dev); + if (ret) { + iommu_group_put(group); + return NULL; + } + + /* + * Where to taint? At this point we've added an IOMMU group for a + * device that is not backed by iommu_ops, therefore any iommu_ + * callback using iommu_ops can legitimately Oops. So, while we may + * be about to give a DMA capable device to a user without IOMMU + * protection, which is clearly taint-worthy, let's go ahead and do + * it here. + */ + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n"); +#endif + + return group; +} +EXPORT_SYMBOL_GPL(vfio_iommu_group_get); + +void vfio_iommu_group_put(struct iommu_group *group, struct device *dev) +{ +#ifdef CONFIG_VFIO_NOIOMMU + if (iommu_group_get_iommudata(group) == &noiommu) + iommu_group_remove_device(dev); +#endif + + iommu_group_put(group); +} +EXPORT_SYMBOL_GPL(vfio_iommu_group_put); + +#ifdef CONFIG_VFIO_NOIOMMU +static void *vfio_noiommu_open(unsigned long arg) +{ + if (arg != VFIO_NOIOMMU_IOMMU) + return ERR_PTR(-EINVAL); + if (!capable(CAP_SYS_RAWIO)) + return ERR_PTR(-EPERM); + + return NULL; +} + +static void vfio_noiommu_release(void *iommu_data) +{ +} + +static long vfio_noiommu_ioctl(void *iommu_data, + unsigned int cmd, unsigned long arg) +{ + if (cmd == VFIO_CHECK_EXTENSION) + return noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0; + + return -ENOTTY; +} + +static int vfio_noiommu_attach_group(void *iommu_data, + struct iommu_group *iommu_group) +{ + return iommu_group_get_iommudata(iommu_group) == &noiommu ? 0 : -EINVAL; +} + +static void vfio_noiommu_detach_group(void *iommu_data, + struct iommu_group *iommu_group) +{ +} + +static const struct vfio_iommu_driver_ops vfio_noiommu_ops = { + .name = "vfio-noiommu", + .owner = THIS_MODULE, + .open = vfio_noiommu_open, + .release = vfio_noiommu_release, + .ioctl = vfio_noiommu_ioctl, + .attach_group = vfio_noiommu_attach_group, + .detach_group = vfio_noiommu_detach_group, +}; +#endif + + +/** + * IOMMU driver registration + */ +int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops) +{ + struct vfio_iommu_driver *driver, *tmp; + + driver = kzalloc(sizeof(*driver), GFP_KERNEL); + if (!driver) + return -ENOMEM; + + driver->ops = ops; + + mutex_lock(&vfio.iommu_drivers_lock); + + /* Check for duplicates */ + list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) { + if (tmp->ops == ops) { + mutex_unlock(&vfio.iommu_drivers_lock); + kfree(driver); + return -EINVAL; + } + } + + list_add(&driver->vfio_next, &vfio.iommu_drivers_list); + + mutex_unlock(&vfio.iommu_drivers_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(vfio_register_iommu_driver); + +void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops) +{ + struct vfio_iommu_driver *driver; + + mutex_lock(&vfio.iommu_drivers_lock); + list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { + if (driver->ops == ops) { + list_del(&driver->vfio_next); + mutex_unlock(&vfio.iommu_drivers_lock); + kfree(driver); + return; + } + } + mutex_unlock(&vfio.iommu_drivers_lock); +} +EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver); + +/** + * Group minor allocation/free - both called with vfio.group_lock held + */ +static int vfio_alloc_group_minor(struct vfio_group *group) +{ + return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL); +} + +static void vfio_free_group_minor(int minor) +{ + idr_remove(&vfio.group_idr, minor); +} + +static int vfio_iommu_group_notifier(struct notifier_block *nb, + unsigned long action, void *data); +static void vfio_group_get(struct vfio_group *group); + +/** + * Container objects - containers are created when /dev/vfio/vfio is + * opened, but their lifecycle extends until the last user is done, so + * it's freed via kref. Must support container/group/device being + * closed in any order. + */ +static void vfio_container_get(struct vfio_container *container) +{ + kref_get(&container->kref); +} + +static void vfio_container_release(struct kref *kref) +{ + struct vfio_container *container; + container = container_of(kref, struct vfio_container, kref); + + kfree(container); +} + +static void vfio_container_put(struct vfio_container *container) +{ + kref_put(&container->kref, vfio_container_release); +} + +static void vfio_group_unlock_and_free(struct vfio_group *group) +{ + mutex_unlock(&vfio.group_lock); + /* + * Unregister outside of lock. A spurious callback is harmless now + * that the group is no longer in vfio.group_list. + */ + iommu_group_unregister_notifier(group->iommu_group, &group->nb); + kfree(group); +} + +/** + * Group objects - create, release, get, put, search + */ +static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) +{ + struct vfio_group *group, *tmp; + struct device *dev; + int ret, minor; + + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) + return ERR_PTR(-ENOMEM); + + kref_init(&group->kref); + INIT_LIST_HEAD(&group->device_list); + mutex_init(&group->device_lock); + INIT_LIST_HEAD(&group->unbound_list); + mutex_init(&group->unbound_lock); + atomic_set(&group->container_users, 0); + atomic_set(&group->opened, 0); + init_waitqueue_head(&group->container_q); + group->iommu_group = iommu_group; +#ifdef CONFIG_VFIO_NOIOMMU + group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu); +#endif + BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); + + group->nb.notifier_call = vfio_iommu_group_notifier; + + /* + * blocking notifiers acquire a rwsem around registering and hold + * it around callback. Therefore, need to register outside of + * vfio.group_lock to avoid A-B/B-A contention. Our callback won't + * do anything unless it can find the group in vfio.group_list, so + * no harm in registering early. + */ + ret = iommu_group_register_notifier(iommu_group, &group->nb); + if (ret) { + kfree(group); + return ERR_PTR(ret); + } + + mutex_lock(&vfio.group_lock); + + /* Did we race creating this group? */ + list_for_each_entry(tmp, &vfio.group_list, vfio_next) { + if (tmp->iommu_group == iommu_group) { + vfio_group_get(tmp); + vfio_group_unlock_and_free(group); + return tmp; + } + } + + minor = vfio_alloc_group_minor(group); + if (minor < 0) { + vfio_group_unlock_and_free(group); + return ERR_PTR(minor); + } + + dev = device_create(vfio.class, NULL, + MKDEV(MAJOR(vfio.group_devt), minor), + group, "%s%d", group->noiommu ? "noiommu-" : "", + iommu_group_id(iommu_group)); + if (IS_ERR(dev)) { + vfio_free_group_minor(minor); + vfio_group_unlock_and_free(group); + return ERR_CAST(dev); + } + + group->minor = minor; + group->dev = dev; + + list_add(&group->vfio_next, &vfio.group_list); + + mutex_unlock(&vfio.group_lock); + + return group; +} + +/* called with vfio.group_lock held */ +static void vfio_group_release(struct kref *kref) +{ + struct vfio_group *group = container_of(kref, struct vfio_group, kref); + struct vfio_unbound_dev *unbound, *tmp; + struct iommu_group *iommu_group = group->iommu_group; + + WARN_ON(!list_empty(&group->device_list)); + WARN_ON(group->notifier.head); + + list_for_each_entry_safe(unbound, tmp, + &group->unbound_list, unbound_next) { + list_del(&unbound->unbound_next); + kfree(unbound); + } + + device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor)); + list_del(&group->vfio_next); + vfio_free_group_minor(group->minor); + vfio_group_unlock_and_free(group); + iommu_group_put(iommu_group); +} + +static void vfio_group_put(struct vfio_group *group) +{ + kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); +} + +struct vfio_group_put_work { + struct work_struct work; + struct vfio_group *group; +}; + +static void vfio_group_put_bg(struct work_struct *work) +{ + struct vfio_group_put_work *do_work; + + do_work = container_of(work, struct vfio_group_put_work, work); + + vfio_group_put(do_work->group); + kfree(do_work); +} + +static void vfio_group_schedule_put(struct vfio_group *group) +{ + struct vfio_group_put_work *do_work; + + do_work = kmalloc(sizeof(*do_work), GFP_KERNEL); + if (WARN_ON(!do_work)) + return; + + INIT_WORK(&do_work->work, vfio_group_put_bg); + do_work->group = group; + schedule_work(&do_work->work); +} + +/* Assume group_lock or group reference is held */ +static void vfio_group_get(struct vfio_group *group) +{ + kref_get(&group->kref); +} + +/* + * Not really a try as we will sleep for mutex, but we need to make + * sure the group pointer is valid under lock and get a reference. + */ +static struct vfio_group *vfio_group_try_get(struct vfio_group *group) +{ + struct vfio_group *target = group; + + mutex_lock(&vfio.group_lock); + list_for_each_entry(group, &vfio.group_list, vfio_next) { + if (group == target) { + vfio_group_get(group); + mutex_unlock(&vfio.group_lock); + return group; + } + } + mutex_unlock(&vfio.group_lock); + + return NULL; +} + +static +struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group) +{ + struct vfio_group *group; + + mutex_lock(&vfio.group_lock); + list_for_each_entry(group, &vfio.group_list, vfio_next) { + if (group->iommu_group == iommu_group) { + vfio_group_get(group); + mutex_unlock(&vfio.group_lock); + return group; + } + } + mutex_unlock(&vfio.group_lock); + + return NULL; +} + +static struct vfio_group *vfio_group_get_from_minor(int minor) +{ + struct vfio_group *group; + + mutex_lock(&vfio.group_lock); + group = idr_find(&vfio.group_idr, minor); + if (!group) { + mutex_unlock(&vfio.group_lock); + return NULL; + } + vfio_group_get(group); + mutex_unlock(&vfio.group_lock); + + return group; +} + +static struct vfio_group *vfio_group_get_from_dev(struct device *dev) +{ + struct iommu_group *iommu_group; + struct vfio_group *group; + + iommu_group = iommu_group_get(dev); + if (!iommu_group) + return NULL; + + group = vfio_group_get_from_iommu(iommu_group); + iommu_group_put(iommu_group); + + return group; +} + +/** + * Device objects - create, release, get, put, search + */ +static +struct vfio_device *vfio_group_create_device(struct vfio_group *group, + struct device *dev, + const struct vfio_device_ops *ops, + void *device_data) +{ + struct vfio_device *device; + + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) + return ERR_PTR(-ENOMEM); + + kref_init(&device->kref); + device->dev = dev; + device->group = group; + device->ops = ops; + device->device_data = device_data; + dev_set_drvdata(dev, device); + + /* No need to get group_lock, caller has group reference */ + vfio_group_get(group); + + mutex_lock(&group->device_lock); + list_add(&device->group_next, &group->device_list); + mutex_unlock(&group->device_lock); + + return device; +} + +static void vfio_device_release(struct kref *kref) +{ + struct vfio_device *device = container_of(kref, + struct vfio_device, kref); + struct vfio_group *group = device->group; + + list_del(&device->group_next); + mutex_unlock(&group->device_lock); + + dev_set_drvdata(device->dev, NULL); + + kfree(device); + + /* vfio_del_group_dev may be waiting for this device */ + wake_up(&vfio.release_q); +} + +/* Device reference always implies a group reference */ +void vfio_device_put(struct vfio_device *device) +{ + struct vfio_group *group = device->group; + kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock); + vfio_group_put(group); +} +EXPORT_SYMBOL_GPL(vfio_device_put); + +static void vfio_device_get(struct vfio_device *device) +{ + vfio_group_get(device->group); + kref_get(&device->kref); +} + +static struct vfio_device *vfio_group_get_device(struct vfio_group *group, + struct device *dev) +{ + struct vfio_device *device; + + mutex_lock(&group->device_lock); + list_for_each_entry(device, &group->device_list, group_next) { + if (device->dev == dev) { + vfio_device_get(device); + mutex_unlock(&group->device_lock); + return device; + } + } + mutex_unlock(&group->device_lock); + return NULL; +} + +/* + * Some drivers, like pci-stub, are only used to prevent other drivers from + * claiming a device and are therefore perfectly legitimate for a user owned + * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping + * of the device, but it does prevent the user from having direct access to + * the device, which is useful in some circumstances. + * + * We also assume that we can include PCI interconnect devices, ie. bridges. + * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge + * then all of the downstream devices will be part of the same IOMMU group as + * the bridge. Thus, if placing the bridge into the user owned IOVA space + * breaks anything, it only does so for user owned devices downstream. Note + * that error notification via MSI can be affected for platforms that handle + * MSI within the same IOVA space as DMA. + */ +static const char * const vfio_driver_whitelist[] = { "pci-stub" }; + +static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv) +{ + if (dev_is_pci(dev)) { + struct pci_dev *pdev = to_pci_dev(dev); + + if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) + return true; + } + + return match_string(vfio_driver_whitelist, + ARRAY_SIZE(vfio_driver_whitelist), + drv->name) >= 0; +} + +/* + * A vfio group is viable for use by userspace if all devices are in + * one of the following states: + * - driver-less + * - bound to a vfio driver + * - bound to a whitelisted driver + * - a PCI interconnect device + * + * We use two methods to determine whether a device is bound to a vfio + * driver. The first is to test whether the device exists in the vfio + * group. The second is to test if the device exists on the group + * unbound_list, indicating it's in the middle of transitioning from + * a vfio driver to driver-less. + */ +static int vfio_dev_viable(struct device *dev, void *data) +{ + struct vfio_group *group = data; + struct vfio_device *device; + struct device_driver *drv = READ_ONCE(dev->driver); + struct vfio_unbound_dev *unbound; + int ret = -EINVAL; + + mutex_lock(&group->unbound_lock); + list_for_each_entry(unbound, &group->unbound_list, unbound_next) { + if (dev == unbound->dev) { + ret = 0; + break; + } + } + mutex_unlock(&group->unbound_lock); + + if (!ret || !drv || vfio_dev_whitelisted(dev, drv)) + return 0; + + device = vfio_group_get_device(group, dev); + if (device) { + vfio_device_put(device); + return 0; + } + + return ret; +} + +/** + * Async device support + */ +static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) +{ + struct vfio_device *device; + + /* Do we already know about it? We shouldn't */ + device = vfio_group_get_device(group, dev); + if (WARN_ON_ONCE(device)) { + vfio_device_put(device); + return 0; + } + + /* Nothing to do for idle groups */ + if (!atomic_read(&group->container_users)) + return 0; + + /* TODO Prevent device auto probing */ + WARN(1, "Device %s added to live group %d!\n", dev_name(dev), + iommu_group_id(group->iommu_group)); + + return 0; +} + +static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev) +{ + /* We don't care what happens when the group isn't in use */ + if (!atomic_read(&group->container_users)) + return 0; + + return vfio_dev_viable(dev, group); +} + +static int vfio_iommu_group_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct vfio_group *group = container_of(nb, struct vfio_group, nb); + struct device *dev = data; + struct vfio_unbound_dev *unbound; + + /* + * Need to go through a group_lock lookup to get a reference or we + * risk racing a group being removed. Ignore spurious notifies. + */ + group = vfio_group_try_get(group); + if (!group) + return NOTIFY_OK; + + switch (action) { + case IOMMU_GROUP_NOTIFY_ADD_DEVICE: + vfio_group_nb_add_dev(group, dev); + break; + case IOMMU_GROUP_NOTIFY_DEL_DEVICE: + /* + * Nothing to do here. If the device is in use, then the + * vfio sub-driver should block the remove callback until + * it is unused. If the device is unused or attached to a + * stub driver, then it should be released and we don't + * care that it will be going away. + */ + break; + case IOMMU_GROUP_NOTIFY_BIND_DRIVER: + pr_debug("%s: Device %s, group %d binding to driver\n", + __func__, dev_name(dev), + iommu_group_id(group->iommu_group)); + break; + case IOMMU_GROUP_NOTIFY_BOUND_DRIVER: + pr_debug("%s: Device %s, group %d bound to driver %s\n", + __func__, dev_name(dev), + iommu_group_id(group->iommu_group), dev->driver->name); + BUG_ON(vfio_group_nb_verify(group, dev)); + break; + case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER: + pr_debug("%s: Device %s, group %d unbinding from driver %s\n", + __func__, dev_name(dev), + iommu_group_id(group->iommu_group), dev->driver->name); + break; + case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER: + pr_debug("%s: Device %s, group %d unbound from driver\n", + __func__, dev_name(dev), + iommu_group_id(group->iommu_group)); + /* + * XXX An unbound device in a live group is ok, but we'd + * really like to avoid the above BUG_ON by preventing other + * drivers from binding to it. Once that occurs, we have to + * stop the system to maintain isolation. At a minimum, we'd + * want a toggle to disable driver auto probe for this device. + */ + + mutex_lock(&group->unbound_lock); + list_for_each_entry(unbound, + &group->unbound_list, unbound_next) { + if (dev == unbound->dev) { + list_del(&unbound->unbound_next); + kfree(unbound); + break; + } + } + mutex_unlock(&group->unbound_lock); + break; + } + + /* + * If we're the last reference to the group, the group will be + * released, which includes unregistering the iommu group notifier. + * We hold a read-lock on that notifier list, unregistering needs + * a write-lock... deadlock. Release our reference asynchronously + * to avoid that situation. + */ + vfio_group_schedule_put(group); + return NOTIFY_OK; +} + +/** + * VFIO driver API + */ +int vfio_add_group_dev(struct device *dev, + const struct vfio_device_ops *ops, void *device_data) +{ + struct iommu_group *iommu_group; + struct vfio_group *group; + struct vfio_device *device; + + iommu_group = iommu_group_get(dev); + if (!iommu_group) + return -EINVAL; + + group = vfio_group_get_from_iommu(iommu_group); + if (!group) { + group = vfio_create_group(iommu_group); + if (IS_ERR(group)) { + iommu_group_put(iommu_group); + return PTR_ERR(group); + } + } else { + /* + * A found vfio_group already holds a reference to the + * iommu_group. A created vfio_group keeps the reference. + */ + iommu_group_put(iommu_group); + } + + device = vfio_group_get_device(group, dev); + if (device) { + WARN(1, "Device %s already exists on group %d\n", + dev_name(dev), iommu_group_id(iommu_group)); + vfio_device_put(device); + vfio_group_put(group); + return -EBUSY; + } + + device = vfio_group_create_device(group, dev, ops, device_data); + if (IS_ERR(device)) { + vfio_group_put(group); + return PTR_ERR(device); + } + + /* + * Drop all but the vfio_device reference. The vfio_device holds + * a reference to the vfio_group, which holds a reference to the + * iommu_group. + */ + vfio_group_put(group); + + return 0; +} +EXPORT_SYMBOL_GPL(vfio_add_group_dev); + +/** + * Get a reference to the vfio_device for a device. Even if the + * caller thinks they own the device, they could be racing with a + * release call path, so we can't trust drvdata for the shortcut. + * Go the long way around, from the iommu_group to the vfio_group + * to the vfio_device. + */ +struct vfio_device *vfio_device_get_from_dev(struct device *dev) +{ + struct vfio_group *group; + struct vfio_device *device; + + group = vfio_group_get_from_dev(dev); + if (!group) + return NULL; + + device = vfio_group_get_device(group, dev); + vfio_group_put(group); + + return device; +} +EXPORT_SYMBOL_GPL(vfio_device_get_from_dev); + +static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, + char *buf) +{ + struct vfio_device *it, *device = NULL; + + mutex_lock(&group->device_lock); + list_for_each_entry(it, &group->device_list, group_next) { + if (!strcmp(dev_name(it->dev), buf)) { + device = it; + vfio_device_get(device); + break; + } + } + mutex_unlock(&group->device_lock); + + return device; +} + +/* + * Caller must hold a reference to the vfio_device + */ +void *vfio_device_data(struct vfio_device *device) +{ + return device->device_data; +} +EXPORT_SYMBOL_GPL(vfio_device_data); + +/* + * Decrement the device reference count and wait for the device to be + * removed. Open file descriptors for the device... */ +void *vfio_del_group_dev(struct device *dev) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct vfio_device *device = dev_get_drvdata(dev); + struct vfio_group *group = device->group; + void *device_data = device->device_data; + struct vfio_unbound_dev *unbound; + unsigned int i = 0; + bool interrupted = false; + + /* + * The group exists so long as we have a device reference. Get + * a group reference and use it to scan for the device going away. + */ + vfio_group_get(group); + + /* + * When the device is removed from the group, the group suddenly + * becomes non-viable; the device has a driver (until the unbind + * completes), but it's not present in the group. This is bad news + * for any external users that need to re-acquire a group reference + * in order to match and release their existing reference. To + * solve this, we track such devices on the unbound_list to bridge + * the gap until they're fully unbound. + */ + unbound = kzalloc(sizeof(*unbound), GFP_KERNEL); + if (unbound) { + unbound->dev = dev; + mutex_lock(&group->unbound_lock); + list_add(&unbound->unbound_next, &group->unbound_list); + mutex_unlock(&group->unbound_lock); + } + WARN_ON(!unbound); + + vfio_device_put(device); + + /* + * If the device is still present in the group after the above + * 'put', then it is in use and we need to request it from the + * bus driver. The driver may in turn need to request the + * device from the user. We send the request on an arbitrary + * interval with counter to allow the driver to take escalating + * measures to release the device if it has the ability to do so. + */ + add_wait_queue(&vfio.release_q, &wait); + + do { + device = vfio_group_get_device(group, dev); + if (!device) + break; + + if (device->ops->request) + device->ops->request(device_data, i++); + + vfio_device_put(device); + + if (interrupted) { + wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10); + } else { + wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10); + if (signal_pending(current)) { + interrupted = true; + dev_warn(dev, + "Device is currently in use, task" + " \"%s\" (%d) " + "blocked until device is released", + current->comm, task_pid_nr(current)); + } + } + + } while (1); + + remove_wait_queue(&vfio.release_q, &wait); + /* + * In order to support multiple devices per group, devices can be + * plucked from the group while other devices in the group are still + * in use. The container persists with this group and those remaining + * devices still attached. If the user creates an isolation violation + * by binding this device to another driver while the group is still in + * use, that's their fault. However, in the case of removing the last, + * or potentially the only, device in the group there can be no other + * in-use devices in the group. The user has done their due diligence + * and we should lay no claims to those devices. In order to do that, + * we need to make sure the group is detached from the container. + * Without this stall, we're potentially racing with a user process + * that may attempt to immediately bind this device to another driver. + */ + if (list_empty(&group->device_list)) + wait_event(group->container_q, !group->container); + + vfio_group_put(group); + + return device_data; +} +EXPORT_SYMBOL_GPL(vfio_del_group_dev); + +/** + * VFIO base fd, /dev/vfio/vfio + */ +static long vfio_ioctl_check_extension(struct vfio_container *container, + unsigned long arg) +{ + struct vfio_iommu_driver *driver; + long ret = 0; + + down_read(&container->group_lock); + + driver = container->iommu_driver; + + switch (arg) { + /* No base extensions yet */ + default: + /* + * If no driver is set, poll all registered drivers for + * extensions and return the first positive result. If + * a driver is already set, further queries will be passed + * only to that driver. + */ + if (!driver) { + mutex_lock(&vfio.iommu_drivers_lock); + list_for_each_entry(driver, &vfio.iommu_drivers_list, + vfio_next) { + +#ifdef CONFIG_VFIO_NOIOMMU + if (!list_empty(&container->group_list) && + (container->noiommu != + (driver->ops == &vfio_noiommu_ops))) + continue; +#endif + + if (!try_module_get(driver->ops->owner)) + continue; + + ret = driver->ops->ioctl(NULL, + VFIO_CHECK_EXTENSION, + arg); + module_put(driver->ops->owner); + if (ret > 0) + break; + } + mutex_unlock(&vfio.iommu_drivers_lock); + } else + ret = driver->ops->ioctl(container->iommu_data, + VFIO_CHECK_EXTENSION, arg); + } + + up_read(&container->group_lock); + + return ret; +} + +/* hold write lock on container->group_lock */ +static int __vfio_container_attach_groups(struct vfio_container *container, + struct vfio_iommu_driver *driver, + void *data) +{ + struct vfio_group *group; + int ret = -ENODEV; + + list_for_each_entry(group, &container->group_list, container_next) { + ret = driver->ops->attach_group(data, group->iommu_group); + if (ret) + goto unwind; + } + + return ret; + +unwind: + list_for_each_entry_continue_reverse(group, &container->group_list, + container_next) { + driver->ops->detach_group(data, group->iommu_group); + } + + return ret; +} + +static long vfio_ioctl_set_iommu(struct vfio_container *container, + unsigned long arg) +{ + struct vfio_iommu_driver *driver; + long ret = -ENODEV; + + down_write(&container->group_lock); + + /* + * The container is designed to be an unprivileged interface while + * the group can be assigned to specific users. Therefore, only by + * adding a group to a container does the user get the privilege of + * enabling the iommu, which may allocate finite resources. There + * is no unset_iommu, but by removing all the groups from a container, + * the container is deprivileged and returns to an unset state. + */ + if (list_empty(&container->group_list) || container->iommu_driver) { + up_write(&container->group_lock); + return -EINVAL; + } + + mutex_lock(&vfio.iommu_drivers_lock); + list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { + void *data; + +#ifdef CONFIG_VFIO_NOIOMMU + /* + * Only noiommu containers can use vfio-noiommu and noiommu + * containers can only use vfio-noiommu. + */ + if (container->noiommu != (driver->ops == &vfio_noiommu_ops)) + continue; +#endif + + if (!try_module_get(driver->ops->owner)) + continue; + + /* + * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION, + * so test which iommu driver reported support for this + * extension and call open on them. We also pass them the + * magic, allowing a single driver to support multiple + * interfaces if they'd like. + */ + if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) { + module_put(driver->ops->owner); + continue; + } + + data = driver->ops->open(arg); + if (IS_ERR(data)) { + ret = PTR_ERR(data); + module_put(driver->ops->owner); + continue; + } + + ret = __vfio_container_attach_groups(container, driver, data); + if (ret) { + driver->ops->release(data); + module_put(driver->ops->owner); + continue; + } + + container->iommu_driver = driver; + container->iommu_data = data; + break; + } + + mutex_unlock(&vfio.iommu_drivers_lock); + up_write(&container->group_lock); + + return ret; +} + +static long vfio_fops_unl_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + struct vfio_container *container = filep->private_data; + struct vfio_iommu_driver *driver; + void *data; + long ret = -EINVAL; + + if (!container) + return ret; + + switch (cmd) { + case VFIO_GET_API_VERSION: + ret = VFIO_API_VERSION; + break; + case VFIO_CHECK_EXTENSION: + ret = vfio_ioctl_check_extension(container, arg); + break; + case VFIO_SET_IOMMU: + ret = vfio_ioctl_set_iommu(container, arg); + break; + default: + driver = container->iommu_driver; + data = container->iommu_data; + + if (driver) /* passthrough all unrecognized ioctls */ + ret = driver->ops->ioctl(data, cmd, arg); + } + + return ret; +} + +#ifdef CONFIG_COMPAT +static long vfio_fops_compat_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + arg = (unsigned long)compat_ptr(arg); + return vfio_fops_unl_ioctl(filep, cmd, arg); +} +#endif /* CONFIG_COMPAT */ + +static int vfio_fops_open(struct inode *inode, struct file *filep) +{ + struct vfio_container *container; + + container = kzalloc(sizeof(*container), GFP_KERNEL); + if (!container) + return -ENOMEM; + + INIT_LIST_HEAD(&container->group_list); + init_rwsem(&container->group_lock); + kref_init(&container->kref); + + filep->private_data = container; + + return 0; +} + +static int vfio_fops_release(struct inode *inode, struct file *filep) +{ + struct vfio_container *container = filep->private_data; + + filep->private_data = NULL; + + vfio_container_put(container); + + return 0; +} + +/* + * Once an iommu driver is set, we optionally pass read/write/mmap + * on to the driver, allowing management interfaces beyond ioctl. + */ +static ssize_t vfio_fops_read(struct file *filep, char __user *buf, + size_t count, loff_t *ppos) +{ + struct vfio_container *container = filep->private_data; + struct vfio_iommu_driver *driver; + ssize_t ret = -EINVAL; + + driver = container->iommu_driver; + if (likely(driver && driver->ops->read)) + ret = driver->ops->read(container->iommu_data, + buf, count, ppos); + + return ret; +} + +static ssize_t vfio_fops_write(struct file *filep, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct vfio_container *container = filep->private_data; + struct vfio_iommu_driver *driver; + ssize_t ret = -EINVAL; + + driver = container->iommu_driver; + if (likely(driver && driver->ops->write)) + ret = driver->ops->write(container->iommu_data, + buf, count, ppos); + + return ret; +} + +static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma) +{ + struct vfio_container *container = filep->private_data; + struct vfio_iommu_driver *driver; + int ret = -EINVAL; + + driver = container->iommu_driver; + if (likely(driver && driver->ops->mmap)) + ret = driver->ops->mmap(container->iommu_data, vma); + + return ret; +} + +static const struct file_operations vfio_fops = { + .owner = THIS_MODULE, + .open = vfio_fops_open, + .release = vfio_fops_release, + .read = vfio_fops_read, + .write = vfio_fops_write, + .unlocked_ioctl = vfio_fops_unl_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = vfio_fops_compat_ioctl, +#endif + .mmap = vfio_fops_mmap, +}; + +/** + * VFIO Group fd, /dev/vfio/$GROUP + */ +static void __vfio_group_unset_container(struct vfio_group *group) +{ + struct vfio_container *container = group->container; + struct vfio_iommu_driver *driver; + + down_write(&container->group_lock); + + driver = container->iommu_driver; + if (driver) + driver->ops->detach_group(container->iommu_data, + group->iommu_group); + + group->container = NULL; + wake_up(&group->container_q); + list_del(&group->container_next); + + /* Detaching the last group deprivileges a container, remove iommu */ + if (driver && list_empty(&container->group_list)) { + driver->ops->release(container->iommu_data); + module_put(driver->ops->owner); + container->iommu_driver = NULL; + container->iommu_data = NULL; + } + + up_write(&container->group_lock); + + vfio_container_put(container); +} + +/* + * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or + * if there was no container to unset. Since the ioctl is called on + * the group, we know that still exists, therefore the only valid + * transition here is 1->0. + */ +static int vfio_group_unset_container(struct vfio_group *group) +{ + int users = atomic_cmpxchg(&group->container_users, 1, 0); + + if (!users) + return -EINVAL; + if (users != 1) + return -EBUSY; + + __vfio_group_unset_container(group); + + return 0; +} + +/* + * When removing container users, anything that removes the last user + * implicitly removes the group from the container. That is, if the + * group file descriptor is closed, as well as any device file descriptors, + * the group is free. + */ +static void vfio_group_try_dissolve_container(struct vfio_group *group) +{ + if (0 == atomic_dec_if_positive(&group->container_users)) + __vfio_group_unset_container(group); +} + +static int vfio_group_set_container(struct vfio_group *group, int container_fd) +{ + struct fd f; + struct vfio_container *container; + struct vfio_iommu_driver *driver; + int ret = 0; + + if (atomic_read(&group->container_users)) + return -EINVAL; + + if (group->noiommu && !capable(CAP_SYS_RAWIO)) + return -EPERM; + + f = fdget(container_fd); + if (!f.file) + return -EBADF; + + /* Sanity check, is this really our fd? */ + if (f.file->f_op != &vfio_fops) { + fdput(f); + return -EINVAL; + } + + container = f.file->private_data; + WARN_ON(!container); /* fget ensures we don't race vfio_release */ + + down_write(&container->group_lock); + + /* Real groups and fake groups cannot mix */ + if (!list_empty(&container->group_list) && + container->noiommu != group->noiommu) { + ret = -EPERM; + goto unlock_out; + } + + driver = container->iommu_driver; + if (driver) { + ret = driver->ops->attach_group(container->iommu_data, + group->iommu_group); + if (ret) + goto unlock_out; + } + + group->container = container; + container->noiommu = group->noiommu; + list_add(&group->container_next, &container->group_list); + + /* Get a reference on the container and mark a user within the group */ + vfio_container_get(container); + atomic_inc(&group->container_users); + +unlock_out: + up_write(&container->group_lock); + fdput(f); + return ret; +} + +static bool vfio_group_viable(struct vfio_group *group) +{ + return (iommu_group_for_each_dev(group->iommu_group, + group, vfio_dev_viable) == 0); +} + +static int vfio_group_add_container_user(struct vfio_group *group) +{ + if (!atomic_inc_not_zero(&group->container_users)) + return -EINVAL; + + if (group->noiommu) { + atomic_dec(&group->container_users); + return -EPERM; + } + if (!group->container->iommu_driver || !vfio_group_viable(group)) { + atomic_dec(&group->container_users); + return -EINVAL; + } + + return 0; +} + +static const struct file_operations vfio_device_fops; + +static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) +{ + struct vfio_device *device; + struct file *filep; + int ret; + + if (0 == atomic_read(&group->container_users) || + !group->container->iommu_driver || !vfio_group_viable(group)) + return -EINVAL; + + if (group->noiommu && !capable(CAP_SYS_RAWIO)) + return -EPERM; + + device = vfio_device_get_from_name(group, buf); + if (!device) + return -ENODEV; + + ret = device->ops->open(device->device_data); + if (ret) { + vfio_device_put(device); + return ret; + } + + /* + * We can't use anon_inode_getfd() because we need to modify + * the f_mode flags directly to allow more than just ioctls + */ + ret = get_unused_fd_flags(O_CLOEXEC); + if (ret < 0) { + device->ops->release(device->device_data); + vfio_device_put(device); + return ret; + } + + filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, + device, O_RDWR); + if (IS_ERR(filep)) { + put_unused_fd(ret); + ret = PTR_ERR(filep); + device->ops->release(device->device_data); + vfio_device_put(device); + return ret; + } + + /* + * TODO: add an anon_inode interface to do this. + * Appears to be missing by lack of need rather than + * explicitly prevented. Now there's need. + */ + filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); + + atomic_inc(&group->container_users); + + fd_install(ret, filep); + + if (group->noiommu) + dev_warn(device->dev, "vfio-noiommu device opened by user " + "(%s:%d)\n", current->comm, task_pid_nr(current)); + + return ret; +} + +static long vfio_group_fops_unl_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + struct vfio_group *group = filep->private_data; + long ret = -ENOTTY; + + switch (cmd) { + case VFIO_GROUP_GET_STATUS: + { + struct vfio_group_status status; + unsigned long minsz; + + minsz = offsetofend(struct vfio_group_status, flags); + + if (copy_from_user(&status, (void __user *)arg, minsz)) + return -EFAULT; + + if (status.argsz < minsz) + return -EINVAL; + + status.flags = 0; + + if (vfio_group_viable(group)) + status.flags |= VFIO_GROUP_FLAGS_VIABLE; + + if (group->container) + status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET; + + if (copy_to_user((void __user *)arg, &status, minsz)) + return -EFAULT; + + ret = 0; + break; + } + case VFIO_GROUP_SET_CONTAINER: + { + int fd; + + if (get_user(fd, (int __user *)arg)) + return -EFAULT; + + if (fd < 0) + return -EINVAL; + + ret = vfio_group_set_container(group, fd); + break; + } + case VFIO_GROUP_UNSET_CONTAINER: + ret = vfio_group_unset_container(group); + break; + case VFIO_GROUP_GET_DEVICE_FD: + { + char *buf; + + buf = strndup_user((const char __user *)arg, PAGE_SIZE); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + ret = vfio_group_get_device_fd(group, buf); + kfree(buf); + break; + } + } + + return ret; +} + +#ifdef CONFIG_COMPAT +static long vfio_group_fops_compat_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + arg = (unsigned long)compat_ptr(arg); + return vfio_group_fops_unl_ioctl(filep, cmd, arg); +} +#endif /* CONFIG_COMPAT */ + +static int vfio_group_fops_open(struct inode *inode, struct file *filep) +{ + struct vfio_group *group; + int opened; + + group = vfio_group_get_from_minor(iminor(inode)); + if (!group) + return -ENODEV; + + if (group->noiommu && !capable(CAP_SYS_RAWIO)) { + vfio_group_put(group); + return -EPERM; + } + + /* Do we need multiple instances of the group open? Seems not. */ + opened = atomic_cmpxchg(&group->opened, 0, 1); + if (opened) { + vfio_group_put(group); + return -EBUSY; + } + + /* Is something still in use from a previous open? */ + if (group->container) { + atomic_dec(&group->opened); + vfio_group_put(group); + return -EBUSY; + } + + /* Warn if previous user didn't cleanup and re-init to drop them */ + if (WARN_ON(group->notifier.head)) + BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); + + filep->private_data = group; + + return 0; +} + +static int vfio_group_fops_release(struct inode *inode, struct file *filep) +{ + struct vfio_group *group = filep->private_data; + + filep->private_data = NULL; + + vfio_group_try_dissolve_container(group); + + atomic_dec(&group->opened); + + vfio_group_put(group); + + return 0; +} + +static const struct file_operations vfio_group_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = vfio_group_fops_unl_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = vfio_group_fops_compat_ioctl, +#endif + .open = vfio_group_fops_open, + .release = vfio_group_fops_release, +}; + +/** + * VFIO Device fd + */ +static int vfio_device_fops_release(struct inode *inode, struct file *filep) +{ + struct vfio_device *device = filep->private_data; + + device->ops->release(device->device_data); + + vfio_group_try_dissolve_container(device->group); + + vfio_device_put(device); + + return 0; +} + +static long vfio_device_fops_unl_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + struct vfio_device *device = filep->private_data; + + if (unlikely(!device->ops->ioctl)) + return -EINVAL; + + return device->ops->ioctl(device->device_data, cmd, arg); +} + +static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf, + size_t count, loff_t *ppos) +{ + struct vfio_device *device = filep->private_data; + + if (unlikely(!device->ops->read)) + return -EINVAL; + + return device->ops->read(device->device_data, buf, count, ppos); +} + +static ssize_t vfio_device_fops_write(struct file *filep, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct vfio_device *device = filep->private_data; + + if (unlikely(!device->ops->write)) + return -EINVAL; + + return device->ops->write(device->device_data, buf, count, ppos); +} + +static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma) +{ + struct vfio_device *device = filep->private_data; + + if (unlikely(!device->ops->mmap)) + return -EINVAL; + + return device->ops->mmap(device->device_data, vma); +} + +#ifdef CONFIG_COMPAT +static long vfio_device_fops_compat_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + arg = (unsigned long)compat_ptr(arg); + return vfio_device_fops_unl_ioctl(filep, cmd, arg); +} +#endif /* CONFIG_COMPAT */ + +static const struct file_operations vfio_device_fops = { + .owner = THIS_MODULE, + .release = vfio_device_fops_release, + .read = vfio_device_fops_read, + .write = vfio_device_fops_write, + .unlocked_ioctl = vfio_device_fops_unl_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = vfio_device_fops_compat_ioctl, +#endif + .mmap = vfio_device_fops_mmap, +}; + +/** + * External user API, exported by symbols to be linked dynamically. + * + * The protocol includes: + * 1. do normal VFIO init operation: + * - opening a new container; + * - attaching group(s) to it; + * - setting an IOMMU driver for a container. + * When IOMMU is set for a container, all groups in it are + * considered ready to use by an external user. + * + * 2. User space passes a group fd to an external user. + * The external user calls vfio_group_get_external_user() + * to verify that: + * - the group is initialized; + * - IOMMU is set for it. + * If both checks passed, vfio_group_get_external_user() + * increments the container user counter to prevent + * the VFIO group from disposal before KVM exits. + * + * 3. The external user calls vfio_external_user_iommu_id() + * to know an IOMMU ID. + * + * 4. When the external KVM finishes, it calls + * vfio_group_put_external_user() to release the VFIO group. + * This call decrements the container user counter. + */ +struct vfio_group *vfio_group_get_external_user(struct file *filep) +{ + struct vfio_group *group = filep->private_data; + int ret; + + if (filep->f_op != &vfio_group_fops) + return ERR_PTR(-EINVAL); + + ret = vfio_group_add_container_user(group); + if (ret) + return ERR_PTR(ret); + + vfio_group_get(group); + + return group; +} +EXPORT_SYMBOL_GPL(vfio_group_get_external_user); + +void vfio_group_put_external_user(struct vfio_group *group) +{ + vfio_group_try_dissolve_container(group); + vfio_group_put(group); +} +EXPORT_SYMBOL_GPL(vfio_group_put_external_user); + +bool vfio_external_group_match_file(struct vfio_group *test_group, + struct file *filep) +{ + struct vfio_group *group = filep->private_data; + + return (filep->f_op == &vfio_group_fops) && (group == test_group); +} +EXPORT_SYMBOL_GPL(vfio_external_group_match_file); + +int vfio_external_user_iommu_id(struct vfio_group *group) +{ + return iommu_group_id(group->iommu_group); +} +EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id); + +long vfio_external_check_extension(struct vfio_group *group, unsigned long arg) +{ + return vfio_ioctl_check_extension(group->container, arg); +} +EXPORT_SYMBOL_GPL(vfio_external_check_extension); + +/** + * Sub-module support + */ +/* + * Helper for managing a buffer of info chain capabilities, allocate or + * reallocate a buffer with additional @size, filling in @id and @version + * of the capability. A pointer to the new capability is returned. + * + * NB. The chain is based at the head of the buffer, so new entries are + * added to the tail, vfio_info_cap_shift() should be called to fixup the + * next offsets prior to copying to the user buffer. + */ +struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps, + size_t size, u16 id, u16 version) +{ + void *buf; + struct vfio_info_cap_header *header, *tmp; + + buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL); + if (!buf) { + kfree(caps->buf); + caps->size = 0; + return ERR_PTR(-ENOMEM); + } + + caps->buf = buf; + header = buf + caps->size; + + /* Eventually copied to user buffer, zero */ + memset(header, 0, size); + + header->id = id; + header->version = version; + + /* Add to the end of the capability chain */ + for (tmp = buf; tmp->next; tmp = buf + tmp->next) + ; /* nothing */ + + tmp->next = caps->size; + caps->size += size; + + return header; +} +EXPORT_SYMBOL_GPL(vfio_info_cap_add); + +void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset) +{ + struct vfio_info_cap_header *tmp; + void *buf = (void *)caps->buf; + + for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset) + tmp->next += offset; +} +EXPORT_SYMBOL(vfio_info_cap_shift); + +int vfio_info_add_capability(struct vfio_info_cap *caps, + struct vfio_info_cap_header *cap, size_t size) +{ + struct vfio_info_cap_header *header; + + header = vfio_info_cap_add(caps, size, cap->id, cap->version); + if (IS_ERR(header)) + return PTR_ERR(header); + + memcpy(header + 1, cap + 1, size - sizeof(*header)); + + return 0; +} +EXPORT_SYMBOL(vfio_info_add_capability); + +int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs, + int max_irq_type, size_t *data_size) +{ + unsigned long minsz; + size_t size; + + minsz = offsetofend(struct vfio_irq_set, count); + + if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) || + (hdr->count >= (U32_MAX - hdr->start)) || + (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | + VFIO_IRQ_SET_ACTION_TYPE_MASK))) + return -EINVAL; + + if (data_size) + *data_size = 0; + + if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs) + return -EINVAL; + + switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { + case VFIO_IRQ_SET_DATA_NONE: + size = 0; + break; + case VFIO_IRQ_SET_DATA_BOOL: + size = sizeof(uint8_t); + break; + case VFIO_IRQ_SET_DATA_EVENTFD: + size = sizeof(int32_t); + break; + default: + return -EINVAL; + } + + if (size) { + if (hdr->argsz - minsz < hdr->count * size) + return -EINVAL; + + if (!data_size) + return -EINVAL; + + *data_size = hdr->count * size; + } + + return 0; +} +EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare); + +/* + * Pin a set of guest PFNs and return their associated host PFNs for local + * domain only. + * @dev [in] : device + * @user_pfn [in]: array of user/guest PFNs to be pinned. + * @npage [in] : count of elements in user_pfn array. This count should not + * be greater VFIO_PIN_PAGES_MAX_ENTRIES. + * @prot [in] : protection flags + * @phys_pfn[out]: array of host PFNs + * Return error or number of pages pinned. + */ +int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage, + int prot, unsigned long *phys_pfn) +{ + struct vfio_container *container; + struct vfio_group *group; + struct vfio_iommu_driver *driver; + int ret; + + if (!dev || !user_pfn || !phys_pfn || !npage) + return -EINVAL; + + if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) + return -E2BIG; + + group = vfio_group_get_from_dev(dev); + if (!group) + return -ENODEV; + + ret = vfio_group_add_container_user(group); + if (ret) + goto err_pin_pages; + + container = group->container; + driver = container->iommu_driver; + if (likely(driver && driver->ops->pin_pages)) + ret = driver->ops->pin_pages(container->iommu_data, user_pfn, + npage, prot, phys_pfn); + else + ret = -ENOTTY; + + vfio_group_try_dissolve_container(group); + +err_pin_pages: + vfio_group_put(group); + return ret; +} +EXPORT_SYMBOL(vfio_pin_pages); + +/* + * Unpin set of host PFNs for local domain only. + * @dev [in] : device + * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest + * PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES. + * @npage [in] : count of elements in user_pfn array. This count should not + * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. + * Return error or number of pages unpinned. + */ +int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage) +{ + struct vfio_container *container; + struct vfio_group *group; + struct vfio_iommu_driver *driver; + int ret; + + if (!dev || !user_pfn || !npage) + return -EINVAL; + + if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) + return -E2BIG; + + group = vfio_group_get_from_dev(dev); + if (!group) + return -ENODEV; + + ret = vfio_group_add_container_user(group); + if (ret) + goto err_unpin_pages; + + container = group->container; + driver = container->iommu_driver; + if (likely(driver && driver->ops->unpin_pages)) + ret = driver->ops->unpin_pages(container->iommu_data, user_pfn, + npage); + else + ret = -ENOTTY; + + vfio_group_try_dissolve_container(group); + +err_unpin_pages: + vfio_group_put(group); + return ret; +} +EXPORT_SYMBOL(vfio_unpin_pages); + +static int vfio_register_iommu_notifier(struct vfio_group *group, + unsigned long *events, + struct notifier_block *nb) +{ + struct vfio_container *container; + struct vfio_iommu_driver *driver; + int ret; + + ret = vfio_group_add_container_user(group); + if (ret) + return -EINVAL; + + container = group->container; + driver = container->iommu_driver; + if (likely(driver && driver->ops->register_notifier)) + ret = driver->ops->register_notifier(container->iommu_data, + events, nb); + else + ret = -ENOTTY; + + vfio_group_try_dissolve_container(group); + + return ret; +} + +static int vfio_unregister_iommu_notifier(struct vfio_group *group, + struct notifier_block *nb) +{ + struct vfio_container *container; + struct vfio_iommu_driver *driver; + int ret; + + ret = vfio_group_add_container_user(group); + if (ret) + return -EINVAL; + + container = group->container; + driver = container->iommu_driver; + if (likely(driver && driver->ops->unregister_notifier)) + ret = driver->ops->unregister_notifier(container->iommu_data, + nb); + else + ret = -ENOTTY; + + vfio_group_try_dissolve_container(group); + + return ret; +} + +void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm) +{ + group->kvm = kvm; + blocking_notifier_call_chain(&group->notifier, + VFIO_GROUP_NOTIFY_SET_KVM, kvm); +} +EXPORT_SYMBOL_GPL(vfio_group_set_kvm); + +static int vfio_register_group_notifier(struct vfio_group *group, + unsigned long *events, + struct notifier_block *nb) +{ + int ret; + bool set_kvm = false; + + if (*events & VFIO_GROUP_NOTIFY_SET_KVM) + set_kvm = true; + + /* clear known events */ + *events &= ~VFIO_GROUP_NOTIFY_SET_KVM; + + /* refuse to continue if still events remaining */ + if (*events) + return -EINVAL; + + ret = vfio_group_add_container_user(group); + if (ret) + return -EINVAL; + + ret = blocking_notifier_chain_register(&group->notifier, nb); + + /* + * The attaching of kvm and vfio_group might already happen, so + * here we replay once upon registration. + */ + if (!ret && set_kvm && group->kvm) + blocking_notifier_call_chain(&group->notifier, + VFIO_GROUP_NOTIFY_SET_KVM, group->kvm); + + vfio_group_try_dissolve_container(group); + + return ret; +} + +static int vfio_unregister_group_notifier(struct vfio_group *group, + struct notifier_block *nb) +{ + int ret; + + ret = vfio_group_add_container_user(group); + if (ret) + return -EINVAL; + + ret = blocking_notifier_chain_unregister(&group->notifier, nb); + + vfio_group_try_dissolve_container(group); + + return ret; +} + +int vfio_register_notifier(struct device *dev, enum vfio_notify_type type, + unsigned long *events, struct notifier_block *nb) +{ + struct vfio_group *group; + int ret; + + if (!dev || !nb || !events || (*events == 0)) + return -EINVAL; + + group = vfio_group_get_from_dev(dev); + if (!group) + return -ENODEV; + + switch (type) { + case VFIO_IOMMU_NOTIFY: + ret = vfio_register_iommu_notifier(group, events, nb); + break; + case VFIO_GROUP_NOTIFY: + ret = vfio_register_group_notifier(group, events, nb); + break; + default: + ret = -EINVAL; + } + + vfio_group_put(group); + return ret; +} +EXPORT_SYMBOL(vfio_register_notifier); + +int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type, + struct notifier_block *nb) +{ + struct vfio_group *group; + int ret; + + if (!dev || !nb) + return -EINVAL; + + group = vfio_group_get_from_dev(dev); + if (!group) + return -ENODEV; + + switch (type) { + case VFIO_IOMMU_NOTIFY: + ret = vfio_unregister_iommu_notifier(group, nb); + break; + case VFIO_GROUP_NOTIFY: + ret = vfio_unregister_group_notifier(group, nb); + break; + default: + ret = -EINVAL; + } + + vfio_group_put(group); + return ret; +} +EXPORT_SYMBOL(vfio_unregister_notifier); + +/** + * Module/class support + */ +static char *vfio_devnode(struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); +} + +static struct miscdevice vfio_dev = { + .minor = VFIO_MINOR, + .name = "vfio", + .fops = &vfio_fops, + .nodename = "vfio/vfio", + .mode = S_IRUGO | S_IWUGO, +}; + +static int __init vfio_init(void) +{ + int ret; + + idr_init(&vfio.group_idr); + mutex_init(&vfio.group_lock); + mutex_init(&vfio.iommu_drivers_lock); + INIT_LIST_HEAD(&vfio.group_list); + INIT_LIST_HEAD(&vfio.iommu_drivers_list); + init_waitqueue_head(&vfio.release_q); + + ret = misc_register(&vfio_dev); + if (ret) { + pr_err("vfio: misc device register failed\n"); + return ret; + } + + /* /dev/vfio/$GROUP */ + vfio.class = class_create(THIS_MODULE, "vfio"); + if (IS_ERR(vfio.class)) { + ret = PTR_ERR(vfio.class); + goto err_class; + } + + vfio.class->devnode = vfio_devnode; + + ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK, "vfio"); + if (ret) + goto err_alloc_chrdev; + + cdev_init(&vfio.group_cdev, &vfio_group_fops); + ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK); + if (ret) + goto err_cdev_add; + + pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); + +#ifdef CONFIG_VFIO_NOIOMMU + vfio_register_iommu_driver(&vfio_noiommu_ops); +#endif + return 0; + +err_cdev_add: + unregister_chrdev_region(vfio.group_devt, MINORMASK); +err_alloc_chrdev: + class_destroy(vfio.class); + vfio.class = NULL; +err_class: + misc_deregister(&vfio_dev); + return ret; +} + +static void __exit vfio_cleanup(void) +{ + WARN_ON(!list_empty(&vfio.group_list)); + +#ifdef CONFIG_VFIO_NOIOMMU + vfio_unregister_iommu_driver(&vfio_noiommu_ops); +#endif + idr_destroy(&vfio.group_idr); + cdev_del(&vfio.group_cdev); + unregister_chrdev_region(vfio.group_devt, MINORMASK); + class_destroy(vfio.class); + vfio.class = NULL; + misc_deregister(&vfio_dev); +} + +module_init(vfio_init); +module_exit(vfio_cleanup); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_ALIAS_MISCDEV(VFIO_MINOR); +MODULE_ALIAS("devname:vfio/vfio"); +MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce"); diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c new file mode 100644 index 000000000..ec53310f1 --- /dev/null +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -0,0 +1,1388 @@ +/* + * VFIO: IOMMU DMA mapping support for TCE on POWER + * + * Copyright (C) 2013 IBM Corp. All rights reserved. + * Author: Alexey Kardashevskiy <aik@ozlabs.ru> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Derived from original vfio_iommu_type1.c: + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + */ + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/err.h> +#include <linux/vfio.h> +#include <linux/vmalloc.h> +#include <linux/sched/mm.h> +#include <linux/sched/signal.h> + +#include <asm/iommu.h> +#include <asm/tce.h> +#include <asm/mmu_context.h> + +#define DRIVER_VERSION "0.1" +#define DRIVER_AUTHOR "aik@ozlabs.ru" +#define DRIVER_DESC "VFIO IOMMU SPAPR TCE" + +static void tce_iommu_detach_group(void *iommu_data, + struct iommu_group *iommu_group); + +static long try_increment_locked_vm(struct mm_struct *mm, long npages) +{ + long ret = 0, locked, lock_limit; + + if (WARN_ON_ONCE(!mm)) + return -EPERM; + + if (!npages) + return 0; + + down_write(&mm->mmap_sem); + locked = mm->locked_vm + npages; + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + if (locked > lock_limit && !capable(CAP_IPC_LOCK)) + ret = -ENOMEM; + else + mm->locked_vm += npages; + + pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid, + npages << PAGE_SHIFT, + mm->locked_vm << PAGE_SHIFT, + rlimit(RLIMIT_MEMLOCK), + ret ? " - exceeded" : ""); + + up_write(&mm->mmap_sem); + + return ret; +} + +static void decrement_locked_vm(struct mm_struct *mm, long npages) +{ + if (!mm || !npages) + return; + + down_write(&mm->mmap_sem); + if (WARN_ON_ONCE(npages > mm->locked_vm)) + npages = mm->locked_vm; + mm->locked_vm -= npages; + pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid, + npages << PAGE_SHIFT, + mm->locked_vm << PAGE_SHIFT, + rlimit(RLIMIT_MEMLOCK)); + up_write(&mm->mmap_sem); +} + +/* + * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation + * + * This code handles mapping and unmapping of user data buffers + * into DMA'ble space using the IOMMU + */ + +struct tce_iommu_group { + struct list_head next; + struct iommu_group *grp; +}; + +/* + * A container needs to remember which preregistered region it has + * referenced to do proper cleanup at the userspace process exit. + */ +struct tce_iommu_prereg { + struct list_head next; + struct mm_iommu_table_group_mem_t *mem; +}; + +/* + * The container descriptor supports only a single group per container. + * Required by the API as the container is not supplied with the IOMMU group + * at the moment of initialization. + */ +struct tce_container { + struct mutex lock; + bool enabled; + bool v2; + bool def_window_pending; + unsigned long locked_pages; + struct mm_struct *mm; + struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; + struct list_head group_list; + struct list_head prereg_list; +}; + +static long tce_iommu_mm_set(struct tce_container *container) +{ + if (container->mm) { + if (container->mm == current->mm) + return 0; + return -EPERM; + } + BUG_ON(!current->mm); + container->mm = current->mm; + atomic_inc(&container->mm->mm_count); + + return 0; +} + +static long tce_iommu_prereg_free(struct tce_container *container, + struct tce_iommu_prereg *tcemem) +{ + long ret; + + ret = mm_iommu_put(container->mm, tcemem->mem); + if (ret) + return ret; + + list_del(&tcemem->next); + kfree(tcemem); + + return 0; +} + +static long tce_iommu_unregister_pages(struct tce_container *container, + __u64 vaddr, __u64 size) +{ + struct mm_iommu_table_group_mem_t *mem; + struct tce_iommu_prereg *tcemem; + bool found = false; + + if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK)) + return -EINVAL; + + mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT); + if (!mem) + return -ENOENT; + + list_for_each_entry(tcemem, &container->prereg_list, next) { + if (tcemem->mem == mem) { + found = true; + break; + } + } + + if (!found) + return -ENOENT; + + return tce_iommu_prereg_free(container, tcemem); +} + +static long tce_iommu_register_pages(struct tce_container *container, + __u64 vaddr, __u64 size) +{ + long ret = 0; + struct mm_iommu_table_group_mem_t *mem = NULL; + struct tce_iommu_prereg *tcemem; + unsigned long entries = size >> PAGE_SHIFT; + + if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) || + ((vaddr + size) < vaddr)) + return -EINVAL; + + mem = mm_iommu_find(container->mm, vaddr, entries); + if (mem) { + list_for_each_entry(tcemem, &container->prereg_list, next) { + if (tcemem->mem == mem) + return -EBUSY; + } + } + + ret = mm_iommu_get(container->mm, vaddr, entries, &mem); + if (ret) + return ret; + + tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL); + if (!tcemem) { + mm_iommu_put(container->mm, mem); + return -ENOMEM; + } + + tcemem->mem = mem; + list_add(&tcemem->next, &container->prereg_list); + + container->enabled = true; + + return 0; +} + +static bool tce_page_is_contained(struct page *page, unsigned page_shift) +{ + /* + * Check that the TCE table granularity is not bigger than the size of + * a page we just found. Otherwise the hardware can get access to + * a bigger memory chunk that it should. + */ + return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift; +} + +static inline bool tce_groups_attached(struct tce_container *container) +{ + return !list_empty(&container->group_list); +} + +static long tce_iommu_find_table(struct tce_container *container, + phys_addr_t ioba, struct iommu_table **ptbl) +{ + long i; + + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { + struct iommu_table *tbl = container->tables[i]; + + if (tbl) { + unsigned long entry = ioba >> tbl->it_page_shift; + unsigned long start = tbl->it_offset; + unsigned long end = start + tbl->it_size; + + if ((start <= entry) && (entry < end)) { + *ptbl = tbl; + return i; + } + } + } + + return -1; +} + +static int tce_iommu_find_free_table(struct tce_container *container) +{ + int i; + + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { + if (!container->tables[i]) + return i; + } + + return -ENOSPC; +} + +static int tce_iommu_enable(struct tce_container *container) +{ + int ret = 0; + unsigned long locked; + struct iommu_table_group *table_group; + struct tce_iommu_group *tcegrp; + + if (container->enabled) + return -EBUSY; + + /* + * When userspace pages are mapped into the IOMMU, they are effectively + * locked memory, so, theoretically, we need to update the accounting + * of locked pages on each map and unmap. For powerpc, the map unmap + * paths can be very hot, though, and the accounting would kill + * performance, especially since it would be difficult to impossible + * to handle the accounting in real mode only. + * + * To address that, rather than precisely accounting every page, we + * instead account for a worst case on locked memory when the iommu is + * enabled and disabled. The worst case upper bound on locked memory + * is the size of the whole iommu window, which is usually relatively + * small (compared to total memory sizes) on POWER hardware. + * + * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits, + * that would effectively kill the guest at random points, much better + * enforcing the limit based on the max that the guest can map. + * + * Unfortunately at the moment it counts whole tables, no matter how + * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups + * each with 2GB DMA window, 8GB will be counted here. The reason for + * this is that we cannot tell here the amount of RAM used by the guest + * as this information is only available from KVM and VFIO is + * KVM agnostic. + * + * So we do not allow enabling a container without a group attached + * as there is no way to know how much we should increment + * the locked_vm counter. + */ + if (!tce_groups_attached(container)) + return -ENODEV; + + tcegrp = list_first_entry(&container->group_list, + struct tce_iommu_group, next); + table_group = iommu_group_get_iommudata(tcegrp->grp); + if (!table_group) + return -ENODEV; + + if (!table_group->tce32_size) + return -EPERM; + + ret = tce_iommu_mm_set(container); + if (ret) + return ret; + + locked = table_group->tce32_size >> PAGE_SHIFT; + ret = try_increment_locked_vm(container->mm, locked); + if (ret) + return ret; + + container->locked_pages = locked; + + container->enabled = true; + + return ret; +} + +static void tce_iommu_disable(struct tce_container *container) +{ + if (!container->enabled) + return; + + container->enabled = false; + + BUG_ON(!container->mm); + decrement_locked_vm(container->mm, container->locked_pages); +} + +static void *tce_iommu_open(unsigned long arg) +{ + struct tce_container *container; + + if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) { + pr_err("tce_vfio: Wrong IOMMU type\n"); + return ERR_PTR(-EINVAL); + } + + container = kzalloc(sizeof(*container), GFP_KERNEL); + if (!container) + return ERR_PTR(-ENOMEM); + + mutex_init(&container->lock); + INIT_LIST_HEAD_RCU(&container->group_list); + INIT_LIST_HEAD_RCU(&container->prereg_list); + + container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU; + + return container; +} + +static int tce_iommu_clear(struct tce_container *container, + struct iommu_table *tbl, + unsigned long entry, unsigned long pages); +static void tce_iommu_free_table(struct tce_container *container, + struct iommu_table *tbl); + +static void tce_iommu_release(void *iommu_data) +{ + struct tce_container *container = iommu_data; + struct tce_iommu_group *tcegrp; + struct tce_iommu_prereg *tcemem, *tmtmp; + long i; + + while (tce_groups_attached(container)) { + tcegrp = list_first_entry(&container->group_list, + struct tce_iommu_group, next); + tce_iommu_detach_group(iommu_data, tcegrp->grp); + } + + /* + * If VFIO created a table, it was not disposed + * by tce_iommu_detach_group() so do it now. + */ + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { + struct iommu_table *tbl = container->tables[i]; + + if (!tbl) + continue; + + tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); + tce_iommu_free_table(container, tbl); + } + + list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next) + WARN_ON(tce_iommu_prereg_free(container, tcemem)); + + tce_iommu_disable(container); + if (container->mm) + mmdrop(container->mm); + mutex_destroy(&container->lock); + + kfree(container); +} + +static void tce_iommu_unuse_page(struct tce_container *container, + unsigned long hpa) +{ + struct page *page; + + page = pfn_to_page(hpa >> PAGE_SHIFT); + put_page(page); +} + +static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, + unsigned long tce, unsigned long shift, + unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem) +{ + long ret = 0; + struct mm_iommu_table_group_mem_t *mem; + + mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift); + if (!mem) + return -EINVAL; + + ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa); + if (ret) + return -EINVAL; + + *pmem = mem; + + return 0; +} + +static void tce_iommu_unuse_page_v2(struct tce_container *container, + struct iommu_table *tbl, unsigned long entry) +{ + struct mm_iommu_table_group_mem_t *mem = NULL; + int ret; + unsigned long hpa = 0; + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); + + if (!pua) + return; + + ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua), + tbl->it_page_shift, &hpa, &mem); + if (ret) + pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n", + __func__, be64_to_cpu(*pua), entry, ret); + if (mem) + mm_iommu_mapped_dec(mem); + + *pua = cpu_to_be64(0); +} + +static int tce_iommu_clear(struct tce_container *container, + struct iommu_table *tbl, + unsigned long entry, unsigned long pages) +{ + unsigned long oldhpa; + long ret; + enum dma_data_direction direction; + + for ( ; pages; --pages, ++entry) { + cond_resched(); + + direction = DMA_NONE; + oldhpa = 0; + ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction); + if (ret) + continue; + + if (direction == DMA_NONE) + continue; + + if (container->v2) { + tce_iommu_unuse_page_v2(container, tbl, entry); + continue; + } + + tce_iommu_unuse_page(container, oldhpa); + } + + return 0; +} + +static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa) +{ + struct page *page = NULL; + enum dma_data_direction direction = iommu_tce_direction(tce); + + if (get_user_pages_fast(tce & PAGE_MASK, 1, + direction != DMA_TO_DEVICE, &page) != 1) + return -EFAULT; + + *hpa = __pa((unsigned long) page_address(page)); + + return 0; +} + +static long tce_iommu_build(struct tce_container *container, + struct iommu_table *tbl, + unsigned long entry, unsigned long tce, unsigned long pages, + enum dma_data_direction direction) +{ + long i, ret = 0; + struct page *page; + unsigned long hpa; + enum dma_data_direction dirtmp; + + for (i = 0; i < pages; ++i) { + unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK; + + ret = tce_iommu_use_page(tce, &hpa); + if (ret) + break; + + page = pfn_to_page(hpa >> PAGE_SHIFT); + if (!tce_page_is_contained(page, tbl->it_page_shift)) { + ret = -EPERM; + break; + } + + hpa |= offset; + dirtmp = direction; + ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp); + if (ret) { + tce_iommu_unuse_page(container, hpa); + pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n", + __func__, entry << tbl->it_page_shift, + tce, ret); + break; + } + + if (dirtmp != DMA_NONE) + tce_iommu_unuse_page(container, hpa); + + tce += IOMMU_PAGE_SIZE(tbl); + } + + if (ret) + tce_iommu_clear(container, tbl, entry, i); + + return ret; +} + +static long tce_iommu_build_v2(struct tce_container *container, + struct iommu_table *tbl, + unsigned long entry, unsigned long tce, unsigned long pages, + enum dma_data_direction direction) +{ + long i, ret = 0; + struct page *page; + unsigned long hpa; + enum dma_data_direction dirtmp; + + for (i = 0; i < pages; ++i) { + struct mm_iommu_table_group_mem_t *mem = NULL; + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i); + + ret = tce_iommu_prereg_ua_to_hpa(container, + tce, tbl->it_page_shift, &hpa, &mem); + if (ret) + break; + + page = pfn_to_page(hpa >> PAGE_SHIFT); + if (!tce_page_is_contained(page, tbl->it_page_shift)) { + ret = -EPERM; + break; + } + + /* Preserve offset within IOMMU page */ + hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK; + dirtmp = direction; + + /* The registered region is being unregistered */ + if (mm_iommu_mapped_inc(mem)) + break; + + ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp); + if (ret) { + /* dirtmp cannot be DMA_NONE here */ + tce_iommu_unuse_page_v2(container, tbl, entry + i); + pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n", + __func__, entry << tbl->it_page_shift, + tce, ret); + break; + } + + if (dirtmp != DMA_NONE) + tce_iommu_unuse_page_v2(container, tbl, entry + i); + + *pua = cpu_to_be64(tce); + + tce += IOMMU_PAGE_SIZE(tbl); + } + + if (ret) + tce_iommu_clear(container, tbl, entry, i); + + return ret; +} + +static long tce_iommu_create_table(struct tce_container *container, + struct iommu_table_group *table_group, + int num, + __u32 page_shift, + __u64 window_size, + __u32 levels, + struct iommu_table **ptbl) +{ + long ret, table_size; + + table_size = table_group->ops->get_table_size(page_shift, window_size, + levels); + if (!table_size) + return -EINVAL; + + ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT); + if (ret) + return ret; + + ret = table_group->ops->create_table(table_group, num, + page_shift, window_size, levels, ptbl); + + WARN_ON(!ret && !(*ptbl)->it_ops->free); + WARN_ON(!ret && ((*ptbl)->it_allocated_size > table_size)); + + return ret; +} + +static void tce_iommu_free_table(struct tce_container *container, + struct iommu_table *tbl) +{ + unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; + + iommu_tce_table_put(tbl); + decrement_locked_vm(container->mm, pages); +} + +static long tce_iommu_create_window(struct tce_container *container, + __u32 page_shift, __u64 window_size, __u32 levels, + __u64 *start_addr) +{ + struct tce_iommu_group *tcegrp; + struct iommu_table_group *table_group; + struct iommu_table *tbl = NULL; + long ret, num; + + num = tce_iommu_find_free_table(container); + if (num < 0) + return num; + + /* Get the first group for ops::create_table */ + tcegrp = list_first_entry(&container->group_list, + struct tce_iommu_group, next); + table_group = iommu_group_get_iommudata(tcegrp->grp); + if (!table_group) + return -EFAULT; + + if (!(table_group->pgsizes & (1ULL << page_shift))) + return -EINVAL; + + if (!table_group->ops->set_window || !table_group->ops->unset_window || + !table_group->ops->get_table_size || + !table_group->ops->create_table) + return -EPERM; + + /* Create TCE table */ + ret = tce_iommu_create_table(container, table_group, num, + page_shift, window_size, levels, &tbl); + if (ret) + return ret; + + BUG_ON(!tbl->it_ops->free); + + /* + * Program the table to every group. + * Groups have been tested for compatibility at the attach time. + */ + list_for_each_entry(tcegrp, &container->group_list, next) { + table_group = iommu_group_get_iommudata(tcegrp->grp); + + ret = table_group->ops->set_window(table_group, num, tbl); + if (ret) + goto unset_exit; + } + + container->tables[num] = tbl; + + /* Return start address assigned by platform in create_table() */ + *start_addr = tbl->it_offset << tbl->it_page_shift; + + return 0; + +unset_exit: + list_for_each_entry(tcegrp, &container->group_list, next) { + table_group = iommu_group_get_iommudata(tcegrp->grp); + table_group->ops->unset_window(table_group, num); + } + tce_iommu_free_table(container, tbl); + + return ret; +} + +static long tce_iommu_remove_window(struct tce_container *container, + __u64 start_addr) +{ + struct iommu_table_group *table_group = NULL; + struct iommu_table *tbl; + struct tce_iommu_group *tcegrp; + int num; + + num = tce_iommu_find_table(container, start_addr, &tbl); + if (num < 0) + return -EINVAL; + + BUG_ON(!tbl->it_size); + + /* Detach groups from IOMMUs */ + list_for_each_entry(tcegrp, &container->group_list, next) { + table_group = iommu_group_get_iommudata(tcegrp->grp); + + /* + * SPAPR TCE IOMMU exposes the default DMA window to + * the guest via dma32_window_start/size of + * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow + * the userspace to remove this window, some do not so + * here we check for the platform capability. + */ + if (!table_group->ops || !table_group->ops->unset_window) + return -EPERM; + + table_group->ops->unset_window(table_group, num); + } + + /* Free table */ + tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); + tce_iommu_free_table(container, tbl); + container->tables[num] = NULL; + + return 0; +} + +static long tce_iommu_create_default_window(struct tce_container *container) +{ + long ret; + __u64 start_addr = 0; + struct tce_iommu_group *tcegrp; + struct iommu_table_group *table_group; + + if (!container->def_window_pending) + return 0; + + if (!tce_groups_attached(container)) + return -ENODEV; + + tcegrp = list_first_entry(&container->group_list, + struct tce_iommu_group, next); + table_group = iommu_group_get_iommudata(tcegrp->grp); + if (!table_group) + return -ENODEV; + + ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K, + table_group->tce32_size, 1, &start_addr); + WARN_ON_ONCE(!ret && start_addr); + + if (!ret) + container->def_window_pending = false; + + return ret; +} + +static long tce_iommu_ioctl(void *iommu_data, + unsigned int cmd, unsigned long arg) +{ + struct tce_container *container = iommu_data; + unsigned long minsz, ddwsz; + long ret; + + switch (cmd) { + case VFIO_CHECK_EXTENSION: + switch (arg) { + case VFIO_SPAPR_TCE_IOMMU: + case VFIO_SPAPR_TCE_v2_IOMMU: + ret = 1; + break; + default: + ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg); + break; + } + + return (ret < 0) ? 0 : ret; + } + + /* + * Sanity check to prevent one userspace from manipulating + * another userspace mm. + */ + BUG_ON(!container); + if (container->mm && container->mm != current->mm) + return -EPERM; + + switch (cmd) { + case VFIO_IOMMU_SPAPR_TCE_GET_INFO: { + struct vfio_iommu_spapr_tce_info info; + struct tce_iommu_group *tcegrp; + struct iommu_table_group *table_group; + + if (!tce_groups_attached(container)) + return -ENXIO; + + tcegrp = list_first_entry(&container->group_list, + struct tce_iommu_group, next); + table_group = iommu_group_get_iommudata(tcegrp->grp); + + if (!table_group) + return -ENXIO; + + minsz = offsetofend(struct vfio_iommu_spapr_tce_info, + dma32_window_size); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + info.dma32_window_start = table_group->tce32_start; + info.dma32_window_size = table_group->tce32_size; + info.flags = 0; + memset(&info.ddw, 0, sizeof(info.ddw)); + + if (table_group->max_dynamic_windows_supported && + container->v2) { + info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW; + info.ddw.pgsizes = table_group->pgsizes; + info.ddw.max_dynamic_windows_supported = + table_group->max_dynamic_windows_supported; + info.ddw.levels = table_group->max_levels; + } + + ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw); + + if (info.argsz >= ddwsz) + minsz = ddwsz; + + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + + return 0; + } + case VFIO_IOMMU_MAP_DMA: { + struct vfio_iommu_type1_dma_map param; + struct iommu_table *tbl = NULL; + long num; + enum dma_data_direction direction; + + if (!container->enabled) + return -EPERM; + + minsz = offsetofend(struct vfio_iommu_type1_dma_map, size); + + if (copy_from_user(¶m, (void __user *)arg, minsz)) + return -EFAULT; + + if (param.argsz < minsz) + return -EINVAL; + + if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ | + VFIO_DMA_MAP_FLAG_WRITE)) + return -EINVAL; + + ret = tce_iommu_create_default_window(container); + if (ret) + return ret; + + num = tce_iommu_find_table(container, param.iova, &tbl); + if (num < 0) + return -ENXIO; + + if ((param.size & ~IOMMU_PAGE_MASK(tbl)) || + (param.vaddr & ~IOMMU_PAGE_MASK(tbl))) + return -EINVAL; + + /* iova is checked by the IOMMU API */ + if (param.flags & VFIO_DMA_MAP_FLAG_READ) { + if (param.flags & VFIO_DMA_MAP_FLAG_WRITE) + direction = DMA_BIDIRECTIONAL; + else + direction = DMA_TO_DEVICE; + } else { + if (param.flags & VFIO_DMA_MAP_FLAG_WRITE) + direction = DMA_FROM_DEVICE; + else + return -EINVAL; + } + + ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr); + if (ret) + return ret; + + if (container->v2) + ret = tce_iommu_build_v2(container, tbl, + param.iova >> tbl->it_page_shift, + param.vaddr, + param.size >> tbl->it_page_shift, + direction); + else + ret = tce_iommu_build(container, tbl, + param.iova >> tbl->it_page_shift, + param.vaddr, + param.size >> tbl->it_page_shift, + direction); + + iommu_flush_tce(tbl); + + return ret; + } + case VFIO_IOMMU_UNMAP_DMA: { + struct vfio_iommu_type1_dma_unmap param; + struct iommu_table *tbl = NULL; + long num; + + if (!container->enabled) + return -EPERM; + + minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, + size); + + if (copy_from_user(¶m, (void __user *)arg, minsz)) + return -EFAULT; + + if (param.argsz < minsz) + return -EINVAL; + + /* No flag is supported now */ + if (param.flags) + return -EINVAL; + + ret = tce_iommu_create_default_window(container); + if (ret) + return ret; + + num = tce_iommu_find_table(container, param.iova, &tbl); + if (num < 0) + return -ENXIO; + + if (param.size & ~IOMMU_PAGE_MASK(tbl)) + return -EINVAL; + + ret = iommu_tce_clear_param_check(tbl, param.iova, 0, + param.size >> tbl->it_page_shift); + if (ret) + return ret; + + ret = tce_iommu_clear(container, tbl, + param.iova >> tbl->it_page_shift, + param.size >> tbl->it_page_shift); + iommu_flush_tce(tbl); + + return ret; + } + case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: { + struct vfio_iommu_spapr_register_memory param; + + if (!container->v2) + break; + + minsz = offsetofend(struct vfio_iommu_spapr_register_memory, + size); + + ret = tce_iommu_mm_set(container); + if (ret) + return ret; + + if (copy_from_user(¶m, (void __user *)arg, minsz)) + return -EFAULT; + + if (param.argsz < minsz) + return -EINVAL; + + /* No flag is supported now */ + if (param.flags) + return -EINVAL; + + mutex_lock(&container->lock); + ret = tce_iommu_register_pages(container, param.vaddr, + param.size); + mutex_unlock(&container->lock); + + return ret; + } + case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: { + struct vfio_iommu_spapr_register_memory param; + + if (!container->v2) + break; + + if (!container->mm) + return -EPERM; + + minsz = offsetofend(struct vfio_iommu_spapr_register_memory, + size); + + if (copy_from_user(¶m, (void __user *)arg, minsz)) + return -EFAULT; + + if (param.argsz < minsz) + return -EINVAL; + + /* No flag is supported now */ + if (param.flags) + return -EINVAL; + + mutex_lock(&container->lock); + ret = tce_iommu_unregister_pages(container, param.vaddr, + param.size); + mutex_unlock(&container->lock); + + return ret; + } + case VFIO_IOMMU_ENABLE: + if (container->v2) + break; + + mutex_lock(&container->lock); + ret = tce_iommu_enable(container); + mutex_unlock(&container->lock); + return ret; + + + case VFIO_IOMMU_DISABLE: + if (container->v2) + break; + + mutex_lock(&container->lock); + tce_iommu_disable(container); + mutex_unlock(&container->lock); + return 0; + + case VFIO_EEH_PE_OP: { + struct tce_iommu_group *tcegrp; + + ret = 0; + list_for_each_entry(tcegrp, &container->group_list, next) { + ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp, + cmd, arg); + if (ret) + return ret; + } + return ret; + } + + case VFIO_IOMMU_SPAPR_TCE_CREATE: { + struct vfio_iommu_spapr_tce_create create; + + if (!container->v2) + break; + + ret = tce_iommu_mm_set(container); + if (ret) + return ret; + + if (!tce_groups_attached(container)) + return -ENXIO; + + minsz = offsetofend(struct vfio_iommu_spapr_tce_create, + start_addr); + + if (copy_from_user(&create, (void __user *)arg, minsz)) + return -EFAULT; + + if (create.argsz < minsz) + return -EINVAL; + + if (create.flags) + return -EINVAL; + + mutex_lock(&container->lock); + + ret = tce_iommu_create_default_window(container); + if (!ret) + ret = tce_iommu_create_window(container, + create.page_shift, + create.window_size, create.levels, + &create.start_addr); + + mutex_unlock(&container->lock); + + if (!ret && copy_to_user((void __user *)arg, &create, minsz)) + ret = -EFAULT; + + return ret; + } + case VFIO_IOMMU_SPAPR_TCE_REMOVE: { + struct vfio_iommu_spapr_tce_remove remove; + + if (!container->v2) + break; + + ret = tce_iommu_mm_set(container); + if (ret) + return ret; + + if (!tce_groups_attached(container)) + return -ENXIO; + + minsz = offsetofend(struct vfio_iommu_spapr_tce_remove, + start_addr); + + if (copy_from_user(&remove, (void __user *)arg, minsz)) + return -EFAULT; + + if (remove.argsz < minsz) + return -EINVAL; + + if (remove.flags) + return -EINVAL; + + if (container->def_window_pending && !remove.start_addr) { + container->def_window_pending = false; + return 0; + } + + mutex_lock(&container->lock); + + ret = tce_iommu_remove_window(container, remove.start_addr); + + mutex_unlock(&container->lock); + + return ret; + } + } + + return -ENOTTY; +} + +static void tce_iommu_release_ownership(struct tce_container *container, + struct iommu_table_group *table_group) +{ + int i; + + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { + struct iommu_table *tbl = container->tables[i]; + + if (!tbl) + continue; + + tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); + if (tbl->it_map) + iommu_release_ownership(tbl); + + container->tables[i] = NULL; + } +} + +static int tce_iommu_take_ownership(struct tce_container *container, + struct iommu_table_group *table_group) +{ + int i, j, rc = 0; + + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { + struct iommu_table *tbl = table_group->tables[i]; + + if (!tbl || !tbl->it_map) + continue; + + rc = iommu_take_ownership(tbl); + if (rc) { + for (j = 0; j < i; ++j) + iommu_release_ownership( + table_group->tables[j]); + + return rc; + } + } + + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) + container->tables[i] = table_group->tables[i]; + + return 0; +} + +static void tce_iommu_release_ownership_ddw(struct tce_container *container, + struct iommu_table_group *table_group) +{ + long i; + + if (!table_group->ops->unset_window) { + WARN_ON_ONCE(1); + return; + } + + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) + table_group->ops->unset_window(table_group, i); + + table_group->ops->release_ownership(table_group); +} + +static long tce_iommu_take_ownership_ddw(struct tce_container *container, + struct iommu_table_group *table_group) +{ + long i, ret = 0; + + if (!table_group->ops->create_table || !table_group->ops->set_window || + !table_group->ops->release_ownership) { + WARN_ON_ONCE(1); + return -EFAULT; + } + + table_group->ops->take_ownership(table_group); + + /* Set all windows to the new group */ + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { + struct iommu_table *tbl = container->tables[i]; + + if (!tbl) + continue; + + ret = table_group->ops->set_window(table_group, i, tbl); + if (ret) + goto release_exit; + } + + return 0; + +release_exit: + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) + table_group->ops->unset_window(table_group, i); + + table_group->ops->release_ownership(table_group); + + return ret; +} + +static int tce_iommu_attach_group(void *iommu_data, + struct iommu_group *iommu_group) +{ + int ret; + struct tce_container *container = iommu_data; + struct iommu_table_group *table_group; + struct tce_iommu_group *tcegrp = NULL; + + mutex_lock(&container->lock); + + /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n", + iommu_group_id(iommu_group), iommu_group); */ + table_group = iommu_group_get_iommudata(iommu_group); + if (!table_group) { + ret = -ENODEV; + goto unlock_exit; + } + + if (tce_groups_attached(container) && (!table_group->ops || + !table_group->ops->take_ownership || + !table_group->ops->release_ownership)) { + ret = -EBUSY; + goto unlock_exit; + } + + /* Check if new group has the same iommu_ops (i.e. compatible) */ + list_for_each_entry(tcegrp, &container->group_list, next) { + struct iommu_table_group *table_group_tmp; + + if (tcegrp->grp == iommu_group) { + pr_warn("tce_vfio: Group %d is already attached\n", + iommu_group_id(iommu_group)); + ret = -EBUSY; + goto unlock_exit; + } + table_group_tmp = iommu_group_get_iommudata(tcegrp->grp); + if (table_group_tmp->ops->create_table != + table_group->ops->create_table) { + pr_warn("tce_vfio: Group %d is incompatible with group %d\n", + iommu_group_id(iommu_group), + iommu_group_id(tcegrp->grp)); + ret = -EPERM; + goto unlock_exit; + } + } + + tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL); + if (!tcegrp) { + ret = -ENOMEM; + goto unlock_exit; + } + + if (!table_group->ops || !table_group->ops->take_ownership || + !table_group->ops->release_ownership) { + if (container->v2) { + ret = -EPERM; + goto unlock_exit; + } + ret = tce_iommu_take_ownership(container, table_group); + } else { + if (!container->v2) { + ret = -EPERM; + goto unlock_exit; + } + ret = tce_iommu_take_ownership_ddw(container, table_group); + if (!tce_groups_attached(container) && !container->tables[0]) + container->def_window_pending = true; + } + + if (!ret) { + tcegrp->grp = iommu_group; + list_add(&tcegrp->next, &container->group_list); + } + +unlock_exit: + if (ret && tcegrp) + kfree(tcegrp); + + mutex_unlock(&container->lock); + + return ret; +} + +static void tce_iommu_detach_group(void *iommu_data, + struct iommu_group *iommu_group) +{ + struct tce_container *container = iommu_data; + struct iommu_table_group *table_group; + bool found = false; + struct tce_iommu_group *tcegrp; + + mutex_lock(&container->lock); + + list_for_each_entry(tcegrp, &container->group_list, next) { + if (tcegrp->grp == iommu_group) { + found = true; + break; + } + } + + if (!found) { + pr_warn("tce_vfio: detaching unattached group #%u\n", + iommu_group_id(iommu_group)); + goto unlock_exit; + } + + list_del(&tcegrp->next); + kfree(tcegrp); + + table_group = iommu_group_get_iommudata(iommu_group); + BUG_ON(!table_group); + + if (!table_group->ops || !table_group->ops->release_ownership) + tce_iommu_release_ownership(container, table_group); + else + tce_iommu_release_ownership_ddw(container, table_group); + +unlock_exit: + mutex_unlock(&container->lock); +} + +const struct vfio_iommu_driver_ops tce_iommu_driver_ops = { + .name = "iommu-vfio-powerpc", + .owner = THIS_MODULE, + .open = tce_iommu_open, + .release = tce_iommu_release, + .ioctl = tce_iommu_ioctl, + .attach_group = tce_iommu_attach_group, + .detach_group = tce_iommu_detach_group, +}; + +static int __init tce_iommu_init(void) +{ + return vfio_register_iommu_driver(&tce_iommu_driver_ops); +} + +static void __exit tce_iommu_cleanup(void) +{ + vfio_unregister_iommu_driver(&tce_iommu_driver_ops); +} + +module_init(tce_iommu_init); +module_exit(tce_iommu_cleanup); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); + diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c new file mode 100644 index 000000000..95ce167a8 --- /dev/null +++ b/drivers/vfio/vfio_iommu_type1.c @@ -0,0 +1,1911 @@ +/* + * VFIO: IOMMU DMA mapping support for Type1 IOMMU + * + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + * + * We arbitrarily define a Type1 IOMMU as one matching the below code. + * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel + * VT-d, but that makes it harder to re-use as theoretically anyone + * implementing a similar IOMMU could make use of this. We expect the + * IOMMU to support the IOMMU API and have few to no restrictions around + * the IOVA range that can be mapped. The Type1 IOMMU is currently + * optimized for relatively static mappings of a userspace process with + * userpsace pages pinned into memory. We also assume devices and IOMMU + * domains are PCI based as the IOMMU API is still centered around a + * device/bus interface rather than a group interface. + */ + +#include <linux/compat.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/mm.h> +#include <linux/rbtree.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/vfio.h> +#include <linux/workqueue.h> +#include <linux/mdev.h> +#include <linux/notifier.h> +#include <linux/dma-iommu.h> +#include <linux/irqdomain.h> + +#define DRIVER_VERSION "0.2" +#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" +#define DRIVER_DESC "Type1 IOMMU driver for VFIO" + +static bool allow_unsafe_interrupts; +module_param_named(allow_unsafe_interrupts, + allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(allow_unsafe_interrupts, + "Enable VFIO IOMMU support for on platforms without interrupt remapping support."); + +static bool disable_hugepages; +module_param_named(disable_hugepages, + disable_hugepages, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(disable_hugepages, + "Disable VFIO IOMMU support for IOMMU hugepages."); + +static unsigned int dma_entry_limit __read_mostly = U16_MAX; +module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644); +MODULE_PARM_DESC(dma_entry_limit, + "Maximum number of user DMA mappings per container (65535)."); + +struct vfio_iommu { + struct list_head domain_list; + struct vfio_domain *external_domain; /* domain for external user */ + struct mutex lock; + struct rb_root dma_list; + struct blocking_notifier_head notifier; + unsigned int dma_avail; + bool v2; + bool nesting; +}; + +struct vfio_domain { + struct iommu_domain *domain; + struct list_head next; + struct list_head group_list; + int prot; /* IOMMU_CACHE */ + bool fgsp; /* Fine-grained super pages */ +}; + +struct vfio_dma { + struct rb_node node; + dma_addr_t iova; /* Device address */ + unsigned long vaddr; /* Process virtual addr */ + size_t size; /* Map size (bytes) */ + int prot; /* IOMMU_READ/WRITE */ + bool iommu_mapped; + bool lock_cap; /* capable(CAP_IPC_LOCK) */ + struct task_struct *task; + struct rb_root pfn_list; /* Ex-user pinned pfn list */ +}; + +struct vfio_group { + struct iommu_group *iommu_group; + struct list_head next; +}; + +/* + * Guest RAM pinning working set or DMA target + */ +struct vfio_pfn { + struct rb_node node; + dma_addr_t iova; /* Device address */ + unsigned long pfn; /* Host pfn */ + atomic_t ref_count; +}; + +struct vfio_regions { + struct list_head list; + dma_addr_t iova; + phys_addr_t phys; + size_t len; +}; + +#define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \ + (!list_empty(&iommu->domain_list)) + +static int put_pfn(unsigned long pfn, int prot); + +/* + * This code handles mapping and unmapping of user data buffers + * into DMA'ble space using the IOMMU + */ + +static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, + dma_addr_t start, size_t size) +{ + struct rb_node *node = iommu->dma_list.rb_node; + + while (node) { + struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); + + if (start + size <= dma->iova) + node = node->rb_left; + else if (start >= dma->iova + dma->size) + node = node->rb_right; + else + return dma; + } + + return NULL; +} + +static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new) +{ + struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; + struct vfio_dma *dma; + + while (*link) { + parent = *link; + dma = rb_entry(parent, struct vfio_dma, node); + + if (new->iova + new->size <= dma->iova) + link = &(*link)->rb_left; + else + link = &(*link)->rb_right; + } + + rb_link_node(&new->node, parent, link); + rb_insert_color(&new->node, &iommu->dma_list); +} + +static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old) +{ + rb_erase(&old->node, &iommu->dma_list); +} + +/* + * Helper Functions for host iova-pfn list + */ +static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova) +{ + struct vfio_pfn *vpfn; + struct rb_node *node = dma->pfn_list.rb_node; + + while (node) { + vpfn = rb_entry(node, struct vfio_pfn, node); + + if (iova < vpfn->iova) + node = node->rb_left; + else if (iova > vpfn->iova) + node = node->rb_right; + else + return vpfn; + } + return NULL; +} + +static void vfio_link_pfn(struct vfio_dma *dma, + struct vfio_pfn *new) +{ + struct rb_node **link, *parent = NULL; + struct vfio_pfn *vpfn; + + link = &dma->pfn_list.rb_node; + while (*link) { + parent = *link; + vpfn = rb_entry(parent, struct vfio_pfn, node); + + if (new->iova < vpfn->iova) + link = &(*link)->rb_left; + else + link = &(*link)->rb_right; + } + + rb_link_node(&new->node, parent, link); + rb_insert_color(&new->node, &dma->pfn_list); +} + +static void vfio_unlink_pfn(struct vfio_dma *dma, struct vfio_pfn *old) +{ + rb_erase(&old->node, &dma->pfn_list); +} + +static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova, + unsigned long pfn) +{ + struct vfio_pfn *vpfn; + + vpfn = kzalloc(sizeof(*vpfn), GFP_KERNEL); + if (!vpfn) + return -ENOMEM; + + vpfn->iova = iova; + vpfn->pfn = pfn; + atomic_set(&vpfn->ref_count, 1); + vfio_link_pfn(dma, vpfn); + return 0; +} + +static void vfio_remove_from_pfn_list(struct vfio_dma *dma, + struct vfio_pfn *vpfn) +{ + vfio_unlink_pfn(dma, vpfn); + kfree(vpfn); +} + +static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma, + unsigned long iova) +{ + struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova); + + if (vpfn) + atomic_inc(&vpfn->ref_count); + return vpfn; +} + +static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) +{ + int ret = 0; + + if (atomic_dec_and_test(&vpfn->ref_count)) { + ret = put_pfn(vpfn->pfn, dma->prot); + vfio_remove_from_pfn_list(dma, vpfn); + } + return ret; +} + +static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) +{ + struct mm_struct *mm; + int ret; + + if (!npage) + return 0; + + mm = async ? get_task_mm(dma->task) : dma->task->mm; + if (!mm) + return -ESRCH; /* process exited */ + + ret = down_write_killable(&mm->mmap_sem); + if (!ret) { + if (npage > 0) { + if (!dma->lock_cap) { + unsigned long limit; + + limit = task_rlimit(dma->task, + RLIMIT_MEMLOCK) >> PAGE_SHIFT; + + if (mm->locked_vm + npage > limit) + ret = -ENOMEM; + } + } + + if (!ret) + mm->locked_vm += npage; + + up_write(&mm->mmap_sem); + } + + if (async) + mmput(mm); + + return ret; +} + +/* + * Some mappings aren't backed by a struct page, for example an mmap'd + * MMIO range for our own or another device. These use a different + * pfn conversion and shouldn't be tracked as locked pages. + */ +static bool is_invalid_reserved_pfn(unsigned long pfn) +{ + if (pfn_valid(pfn)) { + bool reserved; + struct page *tail = pfn_to_page(pfn); + struct page *head = compound_head(tail); + reserved = !!(PageReserved(head)); + if (head != tail) { + /* + * "head" is not a dangling pointer + * (compound_head takes care of that) + * but the hugepage may have been split + * from under us (and we may not hold a + * reference count on the head page so it can + * be reused before we run PageReferenced), so + * we've to check PageTail before returning + * what we just read. + */ + smp_rmb(); + if (PageTail(tail)) + return reserved; + } + return PageReserved(tail); + } + + return true; +} + +static int put_pfn(unsigned long pfn, int prot) +{ + if (!is_invalid_reserved_pfn(pfn)) { + struct page *page = pfn_to_page(pfn); + if (prot & IOMMU_WRITE) + SetPageDirty(page); + put_page(page); + return 1; + } + return 0; +} + +static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, + unsigned long vaddr, unsigned long *pfn, + bool write_fault) +{ + int ret; + + ret = follow_pfn(vma, vaddr, pfn); + if (ret) { + bool unlocked = false; + + ret = fixup_user_fault(NULL, mm, vaddr, + FAULT_FLAG_REMOTE | + (write_fault ? FAULT_FLAG_WRITE : 0), + &unlocked); + if (unlocked) + return -EAGAIN; + + if (ret) + return ret; + + ret = follow_pfn(vma, vaddr, pfn); + } + + return ret; +} + +static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, + int prot, unsigned long *pfn) +{ + struct page *page[1]; + struct vm_area_struct *vma; + struct vm_area_struct *vmas[1]; + unsigned int flags = 0; + int ret; + + if (prot & IOMMU_WRITE) + flags |= FOLL_WRITE; + + down_read(&mm->mmap_sem); + if (mm == current->mm) { + ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas); + } else { + ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, + vmas, NULL); + /* + * The lifetime of a vaddr_get_pfn() page pin is + * userspace-controlled. In the fs-dax case this could + * lead to indefinite stalls in filesystem operations. + * Disallow attempts to pin fs-dax pages via this + * interface. + */ + if (ret > 0 && vma_is_fsdax(vmas[0])) { + ret = -EOPNOTSUPP; + put_page(page[0]); + } + } + up_read(&mm->mmap_sem); + + if (ret == 1) { + *pfn = page_to_pfn(page[0]); + return 0; + } + + down_read(&mm->mmap_sem); + +retry: + vma = find_vma_intersection(mm, vaddr, vaddr + 1); + + if (vma && vma->vm_flags & VM_PFNMAP) { + ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE); + if (ret == -EAGAIN) + goto retry; + + if (!ret && !is_invalid_reserved_pfn(*pfn)) + ret = -EFAULT; + } + + up_read(&mm->mmap_sem); + return ret; +} + +/* + * Attempt to pin pages. We really don't want to track all the pfns and + * the iommu can only map chunks of consecutive pfns anyway, so get the + * first page and all consecutive pages with the same locking. + */ +static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, + long npage, unsigned long *pfn_base, + unsigned long limit) +{ + unsigned long pfn = 0; + long ret, pinned = 0, lock_acct = 0; + bool rsvd; + dma_addr_t iova = vaddr - dma->vaddr + dma->iova; + + /* This code path is only user initiated */ + if (!current->mm) + return -ENODEV; + + ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base); + if (ret) + return ret; + + pinned++; + rsvd = is_invalid_reserved_pfn(*pfn_base); + + /* + * Reserved pages aren't counted against the user, externally pinned + * pages are already counted against the user. + */ + if (!rsvd && !vfio_find_vpfn(dma, iova)) { + if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) { + put_pfn(*pfn_base, dma->prot); + pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, + limit << PAGE_SHIFT); + return -ENOMEM; + } + lock_acct++; + } + + if (unlikely(disable_hugepages)) + goto out; + + /* Lock all the consecutive pages from pfn_base */ + for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; + pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { + ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn); + if (ret) + break; + + if (pfn != *pfn_base + pinned || + rsvd != is_invalid_reserved_pfn(pfn)) { + put_pfn(pfn, dma->prot); + break; + } + + if (!rsvd && !vfio_find_vpfn(dma, iova)) { + if (!dma->lock_cap && + current->mm->locked_vm + lock_acct + 1 > limit) { + put_pfn(pfn, dma->prot); + pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", + __func__, limit << PAGE_SHIFT); + ret = -ENOMEM; + goto unpin_out; + } + lock_acct++; + } + } + +out: + ret = vfio_lock_acct(dma, lock_acct, false); + +unpin_out: + if (ret) { + if (!rsvd) { + for (pfn = *pfn_base ; pinned ; pfn++, pinned--) + put_pfn(pfn, dma->prot); + } + + return ret; + } + + return pinned; +} + +static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, + unsigned long pfn, long npage, + bool do_accounting) +{ + long unlocked = 0, locked = 0; + long i; + + for (i = 0; i < npage; i++, iova += PAGE_SIZE) { + if (put_pfn(pfn++, dma->prot)) { + unlocked++; + if (vfio_find_vpfn(dma, iova)) + locked++; + } + } + + if (do_accounting) + vfio_lock_acct(dma, locked - unlocked, true); + + return unlocked; +} + +static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, + unsigned long *pfn_base, bool do_accounting) +{ + struct mm_struct *mm; + int ret; + + mm = get_task_mm(dma->task); + if (!mm) + return -ENODEV; + + ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); + if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { + ret = vfio_lock_acct(dma, 1, true); + if (ret) { + put_pfn(*pfn_base, dma->prot); + if (ret == -ENOMEM) + pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK " + "(%ld) exceeded\n", __func__, + dma->task->comm, task_pid_nr(dma->task), + task_rlimit(dma->task, RLIMIT_MEMLOCK)); + } + } + + mmput(mm); + return ret; +} + +static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, + bool do_accounting) +{ + int unlocked; + struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova); + + if (!vpfn) + return 0; + + unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); + + if (do_accounting) + vfio_lock_acct(dma, -unlocked, true); + + return unlocked; +} + +static int vfio_iommu_type1_pin_pages(void *iommu_data, + unsigned long *user_pfn, + int npage, int prot, + unsigned long *phys_pfn) +{ + struct vfio_iommu *iommu = iommu_data; + int i, j, ret; + unsigned long remote_vaddr; + struct vfio_dma *dma; + bool do_accounting; + + if (!iommu || !user_pfn || !phys_pfn) + return -EINVAL; + + /* Supported for v2 version only */ + if (!iommu->v2) + return -EACCES; + + mutex_lock(&iommu->lock); + + /* Fail if notifier list is empty */ + if ((!iommu->external_domain) || (!iommu->notifier.head)) { + ret = -EINVAL; + goto pin_done; + } + + /* + * If iommu capable domain exist in the container then all pages are + * already pinned and accounted. Accouting should be done if there is no + * iommu capable domain in the container. + */ + do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu); + + for (i = 0; i < npage; i++) { + dma_addr_t iova; + struct vfio_pfn *vpfn; + + iova = user_pfn[i] << PAGE_SHIFT; + dma = vfio_find_dma(iommu, iova, PAGE_SIZE); + if (!dma) { + ret = -EINVAL; + goto pin_unwind; + } + + if ((dma->prot & prot) != prot) { + ret = -EPERM; + goto pin_unwind; + } + + vpfn = vfio_iova_get_vfio_pfn(dma, iova); + if (vpfn) { + phys_pfn[i] = vpfn->pfn; + continue; + } + + remote_vaddr = dma->vaddr + (iova - dma->iova); + ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i], + do_accounting); + if (ret) + goto pin_unwind; + + ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]); + if (ret) { + if (put_pfn(phys_pfn[i], dma->prot) && do_accounting) + vfio_lock_acct(dma, -1, true); + goto pin_unwind; + } + } + + ret = i; + goto pin_done; + +pin_unwind: + phys_pfn[i] = 0; + for (j = 0; j < i; j++) { + dma_addr_t iova; + + iova = user_pfn[j] << PAGE_SHIFT; + dma = vfio_find_dma(iommu, iova, PAGE_SIZE); + vfio_unpin_page_external(dma, iova, do_accounting); + phys_pfn[j] = 0; + } +pin_done: + mutex_unlock(&iommu->lock); + return ret; +} + +static int vfio_iommu_type1_unpin_pages(void *iommu_data, + unsigned long *user_pfn, + int npage) +{ + struct vfio_iommu *iommu = iommu_data; + bool do_accounting; + int i; + + if (!iommu || !user_pfn) + return -EINVAL; + + /* Supported for v2 version only */ + if (!iommu->v2) + return -EACCES; + + mutex_lock(&iommu->lock); + + if (!iommu->external_domain) { + mutex_unlock(&iommu->lock); + return -EINVAL; + } + + do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu); + for (i = 0; i < npage; i++) { + struct vfio_dma *dma; + dma_addr_t iova; + + iova = user_pfn[i] << PAGE_SHIFT; + dma = vfio_find_dma(iommu, iova, PAGE_SIZE); + if (!dma) + goto unpin_exit; + vfio_unpin_page_external(dma, iova, do_accounting); + } + +unpin_exit: + mutex_unlock(&iommu->lock); + return i > npage ? npage : (i > 0 ? i : -EINVAL); +} + +static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain, + struct list_head *regions) +{ + long unlocked = 0; + struct vfio_regions *entry, *next; + + iommu_tlb_sync(domain->domain); + + list_for_each_entry_safe(entry, next, regions, list) { + unlocked += vfio_unpin_pages_remote(dma, + entry->iova, + entry->phys >> PAGE_SHIFT, + entry->len >> PAGE_SHIFT, + false); + list_del(&entry->list); + kfree(entry); + } + + cond_resched(); + + return unlocked; +} + +/* + * Generally, VFIO needs to unpin remote pages after each IOTLB flush. + * Therefore, when using IOTLB flush sync interface, VFIO need to keep track + * of these regions (currently using a list). + * + * This value specifies maximum number of regions for each IOTLB flush sync. + */ +#define VFIO_IOMMU_TLB_SYNC_MAX 512 + +static size_t unmap_unpin_fast(struct vfio_domain *domain, + struct vfio_dma *dma, dma_addr_t *iova, + size_t len, phys_addr_t phys, long *unlocked, + struct list_head *unmapped_list, + int *unmapped_cnt) +{ + size_t unmapped = 0; + struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL); + + if (entry) { + unmapped = iommu_unmap_fast(domain->domain, *iova, len); + + if (!unmapped) { + kfree(entry); + } else { + iommu_tlb_range_add(domain->domain, *iova, unmapped); + entry->iova = *iova; + entry->phys = phys; + entry->len = unmapped; + list_add_tail(&entry->list, unmapped_list); + + *iova += unmapped; + (*unmapped_cnt)++; + } + } + + /* + * Sync if the number of fast-unmap regions hits the limit + * or in case of errors. + */ + if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) { + *unlocked += vfio_sync_unpin(dma, domain, + unmapped_list); + *unmapped_cnt = 0; + } + + return unmapped; +} + +static size_t unmap_unpin_slow(struct vfio_domain *domain, + struct vfio_dma *dma, dma_addr_t *iova, + size_t len, phys_addr_t phys, + long *unlocked) +{ + size_t unmapped = iommu_unmap(domain->domain, *iova, len); + + if (unmapped) { + *unlocked += vfio_unpin_pages_remote(dma, *iova, + phys >> PAGE_SHIFT, + unmapped >> PAGE_SHIFT, + false); + *iova += unmapped; + cond_resched(); + } + return unmapped; +} + +static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, + bool do_accounting) +{ + dma_addr_t iova = dma->iova, end = dma->iova + dma->size; + struct vfio_domain *domain, *d; + LIST_HEAD(unmapped_region_list); + int unmapped_region_cnt = 0; + long unlocked = 0; + + if (!dma->size) + return 0; + + if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) + return 0; + + /* + * We use the IOMMU to track the physical addresses, otherwise we'd + * need a much more complicated tracking system. Unfortunately that + * means we need to use one of the iommu domains to figure out the + * pfns to unpin. The rest need to be unmapped in advance so we have + * no iommu translations remaining when the pages are unpinned. + */ + domain = d = list_first_entry(&iommu->domain_list, + struct vfio_domain, next); + + list_for_each_entry_continue(d, &iommu->domain_list, next) { + iommu_unmap(d->domain, dma->iova, dma->size); + cond_resched(); + } + + while (iova < end) { + size_t unmapped, len; + phys_addr_t phys, next; + + phys = iommu_iova_to_phys(domain->domain, iova); + if (WARN_ON(!phys)) { + iova += PAGE_SIZE; + continue; + } + + /* + * To optimize for fewer iommu_unmap() calls, each of which + * may require hardware cache flushing, try to find the + * largest contiguous physical memory chunk to unmap. + */ + for (len = PAGE_SIZE; + !domain->fgsp && iova + len < end; len += PAGE_SIZE) { + next = iommu_iova_to_phys(domain->domain, iova + len); + if (next != phys + len) + break; + } + + /* + * First, try to use fast unmap/unpin. In case of failure, + * switch to slow unmap/unpin path. + */ + unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys, + &unlocked, &unmapped_region_list, + &unmapped_region_cnt); + if (!unmapped) { + unmapped = unmap_unpin_slow(domain, dma, &iova, len, + phys, &unlocked); + if (WARN_ON(!unmapped)) + break; + } + } + + dma->iommu_mapped = false; + + if (unmapped_region_cnt) + unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list); + + if (do_accounting) { + vfio_lock_acct(dma, -unlocked, true); + return 0; + } + return unlocked; +} + +static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) +{ + vfio_unmap_unpin(iommu, dma, true); + vfio_unlink_dma(iommu, dma); + put_task_struct(dma->task); + kfree(dma); + iommu->dma_avail++; +} + +static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) +{ + struct vfio_domain *domain; + unsigned long bitmap = ULONG_MAX; + + mutex_lock(&iommu->lock); + list_for_each_entry(domain, &iommu->domain_list, next) + bitmap &= domain->domain->pgsize_bitmap; + mutex_unlock(&iommu->lock); + + /* + * In case the IOMMU supports page sizes smaller than PAGE_SIZE + * we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes. + * That way the user will be able to map/unmap buffers whose size/ + * start address is aligned with PAGE_SIZE. Pinning code uses that + * granularity while iommu driver can use the sub-PAGE_SIZE size + * to map the buffer. + */ + if (bitmap & ~PAGE_MASK) { + bitmap &= PAGE_MASK; + bitmap |= PAGE_SIZE; + } + + return bitmap; +} + +static int vfio_dma_do_unmap(struct vfio_iommu *iommu, + struct vfio_iommu_type1_dma_unmap *unmap) +{ + uint64_t mask; + struct vfio_dma *dma, *dma_last = NULL; + size_t unmapped = 0; + int ret = 0, retries = 0; + + mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; + + if (unmap->iova & mask) + return -EINVAL; + if (!unmap->size || unmap->size & mask) + return -EINVAL; + if (unmap->iova + unmap->size - 1 < unmap->iova || + unmap->size > SIZE_MAX) + return -EINVAL; + + WARN_ON(mask & PAGE_MASK); +again: + mutex_lock(&iommu->lock); + + /* + * vfio-iommu-type1 (v1) - User mappings were coalesced together to + * avoid tracking individual mappings. This means that the granularity + * of the original mapping was lost and the user was allowed to attempt + * to unmap any range. Depending on the contiguousness of physical + * memory and page sizes supported by the IOMMU, arbitrary unmaps may + * or may not have worked. We only guaranteed unmap granularity + * matching the original mapping; even though it was untracked here, + * the original mappings are reflected in IOMMU mappings. This + * resulted in a couple unusual behaviors. First, if a range is not + * able to be unmapped, ex. a set of 4k pages that was mapped as a + * 2M hugepage into the IOMMU, the unmap ioctl returns success but with + * a zero sized unmap. Also, if an unmap request overlaps the first + * address of a hugepage, the IOMMU will unmap the entire hugepage. + * This also returns success and the returned unmap size reflects the + * actual size unmapped. + * + * We attempt to maintain compatibility with this "v1" interface, but + * we take control out of the hands of the IOMMU. Therefore, an unmap + * request offset from the beginning of the original mapping will + * return success with zero sized unmap. And an unmap request covering + * the first iova of mapping will unmap the entire range. + * + * The v2 version of this interface intends to be more deterministic. + * Unmap requests must fully cover previous mappings. Multiple + * mappings may still be unmaped by specifying large ranges, but there + * must not be any previous mappings bisected by the range. An error + * will be returned if these conditions are not met. The v2 interface + * will only return success and a size of zero if there were no + * mappings within the range. + */ + if (iommu->v2) { + dma = vfio_find_dma(iommu, unmap->iova, 1); + if (dma && dma->iova != unmap->iova) { + ret = -EINVAL; + goto unlock; + } + dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0); + if (dma && dma->iova + dma->size != unmap->iova + unmap->size) { + ret = -EINVAL; + goto unlock; + } + } + + while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) { + if (!iommu->v2 && unmap->iova > dma->iova) + break; + /* + * Task with same address space who mapped this iova range is + * allowed to unmap the iova range. + */ + if (dma->task->mm != current->mm) + break; + + if (!RB_EMPTY_ROOT(&dma->pfn_list)) { + struct vfio_iommu_type1_dma_unmap nb_unmap; + + if (dma_last == dma) { + BUG_ON(++retries > 10); + } else { + dma_last = dma; + retries = 0; + } + + nb_unmap.iova = dma->iova; + nb_unmap.size = dma->size; + + /* + * Notify anyone (mdev vendor drivers) to invalidate and + * unmap iovas within the range we're about to unmap. + * Vendor drivers MUST unpin pages in response to an + * invalidation. + */ + mutex_unlock(&iommu->lock); + blocking_notifier_call_chain(&iommu->notifier, + VFIO_IOMMU_NOTIFY_DMA_UNMAP, + &nb_unmap); + goto again; + } + unmapped += dma->size; + vfio_remove_dma(iommu, dma); + } + +unlock: + mutex_unlock(&iommu->lock); + + /* Report how much was unmapped */ + unmap->size = unmapped; + + return ret; +} + +/* + * Turns out AMD IOMMU has a page table bug where it won't map large pages + * to a region that previously mapped smaller pages. This should be fixed + * soon, so this is just a temporary workaround to break mappings down into + * PAGE_SIZE. Better to map smaller pages than nothing. + */ +static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova, + unsigned long pfn, long npage, int prot) +{ + long i; + int ret = 0; + + for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { + ret = iommu_map(domain->domain, iova, + (phys_addr_t)pfn << PAGE_SHIFT, + PAGE_SIZE, prot | domain->prot); + if (ret) + break; + } + + for (; i < npage && i > 0; i--, iova -= PAGE_SIZE) + iommu_unmap(domain->domain, iova, PAGE_SIZE); + + return ret; +} + +static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, + unsigned long pfn, long npage, int prot) +{ + struct vfio_domain *d; + int ret; + + list_for_each_entry(d, &iommu->domain_list, next) { + ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT, + npage << PAGE_SHIFT, prot | d->prot); + if (ret) { + if (ret != -EBUSY || + map_try_harder(d, iova, pfn, npage, prot)) + goto unwind; + } + + cond_resched(); + } + + return 0; + +unwind: + list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) + iommu_unmap(d->domain, iova, npage << PAGE_SHIFT); + + return ret; +} + +static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, + size_t map_size) +{ + dma_addr_t iova = dma->iova; + unsigned long vaddr = dma->vaddr; + size_t size = map_size; + long npage; + unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + int ret = 0; + + while (size) { + /* Pin a contiguous chunk of memory */ + npage = vfio_pin_pages_remote(dma, vaddr + dma->size, + size >> PAGE_SHIFT, &pfn, limit); + if (npage <= 0) { + WARN_ON(!npage); + ret = (int)npage; + break; + } + + /* Map it! */ + ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, + dma->prot); + if (ret) { + vfio_unpin_pages_remote(dma, iova + dma->size, pfn, + npage, true); + break; + } + + size -= npage << PAGE_SHIFT; + dma->size += npage << PAGE_SHIFT; + } + + dma->iommu_mapped = true; + + if (ret) + vfio_remove_dma(iommu, dma); + + return ret; +} + +static int vfio_dma_do_map(struct vfio_iommu *iommu, + struct vfio_iommu_type1_dma_map *map) +{ + dma_addr_t iova = map->iova; + unsigned long vaddr = map->vaddr; + size_t size = map->size; + int ret = 0, prot = 0; + uint64_t mask; + struct vfio_dma *dma; + + /* Verify that none of our __u64 fields overflow */ + if (map->size != size || map->vaddr != vaddr || map->iova != iova) + return -EINVAL; + + mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; + + WARN_ON(mask & PAGE_MASK); + + /* READ/WRITE from device perspective */ + if (map->flags & VFIO_DMA_MAP_FLAG_WRITE) + prot |= IOMMU_WRITE; + if (map->flags & VFIO_DMA_MAP_FLAG_READ) + prot |= IOMMU_READ; + + if (!prot || !size || (size | iova | vaddr) & mask) + return -EINVAL; + + /* Don't allow IOVA or virtual address wrap */ + if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) + return -EINVAL; + + mutex_lock(&iommu->lock); + + if (vfio_find_dma(iommu, iova, size)) { + ret = -EEXIST; + goto out_unlock; + } + + if (!iommu->dma_avail) { + ret = -ENOSPC; + goto out_unlock; + } + + dma = kzalloc(sizeof(*dma), GFP_KERNEL); + if (!dma) { + ret = -ENOMEM; + goto out_unlock; + } + + iommu->dma_avail--; + dma->iova = iova; + dma->vaddr = vaddr; + dma->prot = prot; + + /* + * We need to be able to both add to a task's locked memory and test + * against the locked memory limit and we need to be able to do both + * outside of this call path as pinning can be asynchronous via the + * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a + * task_struct and VM locked pages requires an mm_struct, however + * holding an indefinite mm reference is not recommended, therefore we + * only hold a reference to a task. We could hold a reference to + * current, however QEMU uses this call path through vCPU threads, + * which can be killed resulting in a NULL mm and failure in the unmap + * path when called via a different thread. Avoid this problem by + * using the group_leader as threads within the same group require + * both CLONE_THREAD and CLONE_VM and will therefore use the same + * mm_struct. + * + * Previously we also used the task for testing CAP_IPC_LOCK at the + * time of pinning and accounting, however has_capability() makes use + * of real_cred, a copy-on-write field, so we can't guarantee that it + * matches group_leader, or in fact that it might not change by the + * time it's evaluated. If a process were to call MAP_DMA with + * CAP_IPC_LOCK but later drop it, it doesn't make sense that they + * possibly see different results for an iommu_mapped vfio_dma vs + * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the + * time of calling MAP_DMA. + */ + get_task_struct(current->group_leader); + dma->task = current->group_leader; + dma->lock_cap = capable(CAP_IPC_LOCK); + + dma->pfn_list = RB_ROOT; + + /* Insert zero-sized and grow as we map chunks of it */ + vfio_link_dma(iommu, dma); + + /* Don't pin and map if container doesn't contain IOMMU capable domain*/ + if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) + dma->size = size; + else + ret = vfio_pin_map_dma(iommu, dma, size); + +out_unlock: + mutex_unlock(&iommu->lock); + return ret; +} + +static int vfio_bus_type(struct device *dev, void *data) +{ + struct bus_type **bus = data; + + if (*bus && *bus != dev->bus) + return -EINVAL; + + *bus = dev->bus; + + return 0; +} + +static int vfio_iommu_replay(struct vfio_iommu *iommu, + struct vfio_domain *domain) +{ + struct vfio_domain *d = NULL; + struct rb_node *n; + unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + int ret; + + /* Arbitrarily pick the first domain in the list for lookups */ + if (!list_empty(&iommu->domain_list)) + d = list_first_entry(&iommu->domain_list, + struct vfio_domain, next); + + n = rb_first(&iommu->dma_list); + + for (; n; n = rb_next(n)) { + struct vfio_dma *dma; + dma_addr_t iova; + + dma = rb_entry(n, struct vfio_dma, node); + iova = dma->iova; + + while (iova < dma->iova + dma->size) { + phys_addr_t phys; + size_t size; + + if (dma->iommu_mapped) { + phys_addr_t p; + dma_addr_t i; + + if (WARN_ON(!d)) { /* mapped w/o a domain?! */ + ret = -EINVAL; + goto unwind; + } + + phys = iommu_iova_to_phys(d->domain, iova); + + if (WARN_ON(!phys)) { + iova += PAGE_SIZE; + continue; + } + + size = PAGE_SIZE; + p = phys + size; + i = iova + size; + while (i < dma->iova + dma->size && + p == iommu_iova_to_phys(d->domain, i)) { + size += PAGE_SIZE; + p += PAGE_SIZE; + i += PAGE_SIZE; + } + } else { + unsigned long pfn; + unsigned long vaddr = dma->vaddr + + (iova - dma->iova); + size_t n = dma->iova + dma->size - iova; + long npage; + + npage = vfio_pin_pages_remote(dma, vaddr, + n >> PAGE_SHIFT, + &pfn, limit); + if (npage <= 0) { + WARN_ON(!npage); + ret = (int)npage; + goto unwind; + } + + phys = pfn << PAGE_SHIFT; + size = npage << PAGE_SHIFT; + } + + ret = iommu_map(domain->domain, iova, phys, + size, dma->prot | domain->prot); + if (ret) { + if (!dma->iommu_mapped) + vfio_unpin_pages_remote(dma, iova, + phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, + true); + goto unwind; + } + + iova += size; + } + } + + /* All dmas are now mapped, defer to second tree walk for unwind */ + for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + + dma->iommu_mapped = true; + } + + return 0; + +unwind: + for (; n; n = rb_prev(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + dma_addr_t iova; + + if (dma->iommu_mapped) { + iommu_unmap(domain->domain, dma->iova, dma->size); + continue; + } + + iova = dma->iova; + while (iova < dma->iova + dma->size) { + phys_addr_t phys, p; + size_t size; + dma_addr_t i; + + phys = iommu_iova_to_phys(domain->domain, iova); + if (!phys) { + iova += PAGE_SIZE; + continue; + } + + size = PAGE_SIZE; + p = phys + size; + i = iova + size; + while (i < dma->iova + dma->size && + p == iommu_iova_to_phys(domain->domain, i)) { + size += PAGE_SIZE; + p += PAGE_SIZE; + i += PAGE_SIZE; + } + + iommu_unmap(domain->domain, iova, size); + vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, true); + } + } + + return ret; +} + +/* + * We change our unmap behavior slightly depending on whether the IOMMU + * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage + * for practically any contiguous power-of-two mapping we give it. This means + * we don't need to look for contiguous chunks ourselves to make unmapping + * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d + * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks + * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when + * hugetlbfs is in use. + */ +static void vfio_test_domain_fgsp(struct vfio_domain *domain) +{ + struct page *pages; + int ret, order = get_order(PAGE_SIZE * 2); + + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!pages) + return; + + ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2, + IOMMU_READ | IOMMU_WRITE | domain->prot); + if (!ret) { + size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE); + + if (unmapped == PAGE_SIZE) + iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE); + else + domain->fgsp = true; + } + + __free_pages(pages, order); +} + +static struct vfio_group *find_iommu_group(struct vfio_domain *domain, + struct iommu_group *iommu_group) +{ + struct vfio_group *g; + + list_for_each_entry(g, &domain->group_list, next) { + if (g->iommu_group == iommu_group) + return g; + } + + return NULL; +} + +static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base) +{ + struct list_head group_resv_regions; + struct iommu_resv_region *region, *next; + bool ret = false; + + INIT_LIST_HEAD(&group_resv_regions); + iommu_get_group_resv_regions(group, &group_resv_regions); + list_for_each_entry(region, &group_resv_regions, list) { + /* + * The presence of any 'real' MSI regions should take + * precedence over the software-managed one if the + * IOMMU driver happens to advertise both types. + */ + if (region->type == IOMMU_RESV_MSI) { + ret = false; + break; + } + + if (region->type == IOMMU_RESV_SW_MSI) { + *base = region->start; + ret = true; + } + } + list_for_each_entry_safe(region, next, &group_resv_regions, list) + kfree(region); + return ret; +} + +static int vfio_iommu_type1_attach_group(void *iommu_data, + struct iommu_group *iommu_group) +{ + struct vfio_iommu *iommu = iommu_data; + struct vfio_group *group; + struct vfio_domain *domain, *d; + struct bus_type *bus = NULL, *mdev_bus; + int ret; + bool resv_msi, msi_remap; + phys_addr_t resv_msi_base; + + mutex_lock(&iommu->lock); + + list_for_each_entry(d, &iommu->domain_list, next) { + if (find_iommu_group(d, iommu_group)) { + mutex_unlock(&iommu->lock); + return -EINVAL; + } + } + + if (iommu->external_domain) { + if (find_iommu_group(iommu->external_domain, iommu_group)) { + mutex_unlock(&iommu->lock); + return -EINVAL; + } + } + + group = kzalloc(sizeof(*group), GFP_KERNEL); + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!group || !domain) { + ret = -ENOMEM; + goto out_free; + } + + group->iommu_group = iommu_group; + + /* Determine bus_type in order to allocate a domain */ + ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type); + if (ret) + goto out_free; + + mdev_bus = symbol_get(mdev_bus_type); + + if (mdev_bus) { + if ((bus == mdev_bus) && !iommu_present(bus)) { + symbol_put(mdev_bus_type); + if (!iommu->external_domain) { + INIT_LIST_HEAD(&domain->group_list); + iommu->external_domain = domain; + } else + kfree(domain); + + list_add(&group->next, + &iommu->external_domain->group_list); + mutex_unlock(&iommu->lock); + return 0; + } + symbol_put(mdev_bus_type); + } + + domain->domain = iommu_domain_alloc(bus); + if (!domain->domain) { + ret = -EIO; + goto out_free; + } + + if (iommu->nesting) { + int attr = 1; + + ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING, + &attr); + if (ret) + goto out_domain; + } + + ret = iommu_attach_group(domain->domain, iommu_group); + if (ret) + goto out_domain; + + resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base); + + INIT_LIST_HEAD(&domain->group_list); + list_add(&group->next, &domain->group_list); + + msi_remap = irq_domain_check_msi_remap() || + iommu_capable(bus, IOMMU_CAP_INTR_REMAP); + + if (!allow_unsafe_interrupts && !msi_remap) { + pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", + __func__); + ret = -EPERM; + goto out_detach; + } + + if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY)) + domain->prot |= IOMMU_CACHE; + + /* + * Try to match an existing compatible domain. We don't want to + * preclude an IOMMU driver supporting multiple bus_types and being + * able to include different bus_types in the same IOMMU domain, so + * we test whether the domains use the same iommu_ops rather than + * testing if they're on the same bus_type. + */ + list_for_each_entry(d, &iommu->domain_list, next) { + if (d->domain->ops == domain->domain->ops && + d->prot == domain->prot) { + iommu_detach_group(domain->domain, iommu_group); + if (!iommu_attach_group(d->domain, iommu_group)) { + list_add(&group->next, &d->group_list); + iommu_domain_free(domain->domain); + kfree(domain); + mutex_unlock(&iommu->lock); + return 0; + } + + ret = iommu_attach_group(domain->domain, iommu_group); + if (ret) + goto out_domain; + } + } + + vfio_test_domain_fgsp(domain); + + /* replay mappings on new domains */ + ret = vfio_iommu_replay(iommu, domain); + if (ret) + goto out_detach; + + if (resv_msi) { + ret = iommu_get_msi_cookie(domain->domain, resv_msi_base); + if (ret) + goto out_detach; + } + + list_add(&domain->next, &iommu->domain_list); + + mutex_unlock(&iommu->lock); + + return 0; + +out_detach: + iommu_detach_group(domain->domain, iommu_group); +out_domain: + iommu_domain_free(domain->domain); +out_free: + kfree(domain); + kfree(group); + mutex_unlock(&iommu->lock); + return ret; +} + +static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu) +{ + struct rb_node *node; + + while ((node = rb_first(&iommu->dma_list))) + vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node)); +} + +static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) +{ + struct rb_node *n, *p; + + n = rb_first(&iommu->dma_list); + for (; n; n = rb_next(n)) { + struct vfio_dma *dma; + long locked = 0, unlocked = 0; + + dma = rb_entry(n, struct vfio_dma, node); + unlocked += vfio_unmap_unpin(iommu, dma, false); + p = rb_first(&dma->pfn_list); + for (; p; p = rb_next(p)) { + struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, + node); + + if (!is_invalid_reserved_pfn(vpfn->pfn)) + locked++; + } + vfio_lock_acct(dma, locked - unlocked, true); + } +} + +static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu) +{ + struct rb_node *n; + + n = rb_first(&iommu->dma_list); + for (; n; n = rb_next(n)) { + struct vfio_dma *dma; + + dma = rb_entry(n, struct vfio_dma, node); + + if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list))) + break; + } + /* mdev vendor driver must unregister notifier */ + WARN_ON(iommu->notifier.head); +} + +static void vfio_iommu_type1_detach_group(void *iommu_data, + struct iommu_group *iommu_group) +{ + struct vfio_iommu *iommu = iommu_data; + struct vfio_domain *domain; + struct vfio_group *group; + + mutex_lock(&iommu->lock); + + if (iommu->external_domain) { + group = find_iommu_group(iommu->external_domain, iommu_group); + if (group) { + list_del(&group->next); + kfree(group); + + if (list_empty(&iommu->external_domain->group_list)) { + vfio_sanity_check_pfn_list(iommu); + + if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) + vfio_iommu_unmap_unpin_all(iommu); + + kfree(iommu->external_domain); + iommu->external_domain = NULL; + } + goto detach_group_done; + } + } + + list_for_each_entry(domain, &iommu->domain_list, next) { + group = find_iommu_group(domain, iommu_group); + if (!group) + continue; + + iommu_detach_group(domain->domain, iommu_group); + list_del(&group->next); + kfree(group); + /* + * Group ownership provides privilege, if the group list is + * empty, the domain goes away. If it's the last domain with + * iommu and external domain doesn't exist, then all the + * mappings go away too. If it's the last domain with iommu and + * external domain exist, update accounting + */ + if (list_empty(&domain->group_list)) { + if (list_is_singular(&iommu->domain_list)) { + if (!iommu->external_domain) + vfio_iommu_unmap_unpin_all(iommu); + else + vfio_iommu_unmap_unpin_reaccount(iommu); + } + iommu_domain_free(domain->domain); + list_del(&domain->next); + kfree(domain); + } + break; + } + +detach_group_done: + mutex_unlock(&iommu->lock); +} + +static void *vfio_iommu_type1_open(unsigned long arg) +{ + struct vfio_iommu *iommu; + + iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); + if (!iommu) + return ERR_PTR(-ENOMEM); + + switch (arg) { + case VFIO_TYPE1_IOMMU: + break; + case VFIO_TYPE1_NESTING_IOMMU: + iommu->nesting = true; + /* fall through */ + case VFIO_TYPE1v2_IOMMU: + iommu->v2 = true; + break; + default: + kfree(iommu); + return ERR_PTR(-EINVAL); + } + + INIT_LIST_HEAD(&iommu->domain_list); + iommu->dma_list = RB_ROOT; + iommu->dma_avail = dma_entry_limit; + mutex_init(&iommu->lock); + BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); + + return iommu; +} + +static void vfio_release_domain(struct vfio_domain *domain, bool external) +{ + struct vfio_group *group, *group_tmp; + + list_for_each_entry_safe(group, group_tmp, + &domain->group_list, next) { + if (!external) + iommu_detach_group(domain->domain, group->iommu_group); + list_del(&group->next); + kfree(group); + } + + if (!external) + iommu_domain_free(domain->domain); +} + +static void vfio_iommu_type1_release(void *iommu_data) +{ + struct vfio_iommu *iommu = iommu_data; + struct vfio_domain *domain, *domain_tmp; + + if (iommu->external_domain) { + vfio_release_domain(iommu->external_domain, true); + vfio_sanity_check_pfn_list(iommu); + kfree(iommu->external_domain); + } + + vfio_iommu_unmap_unpin_all(iommu); + + list_for_each_entry_safe(domain, domain_tmp, + &iommu->domain_list, next) { + vfio_release_domain(domain, false); + list_del(&domain->next); + kfree(domain); + } + kfree(iommu); +} + +static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu) +{ + struct vfio_domain *domain; + int ret = 1; + + mutex_lock(&iommu->lock); + list_for_each_entry(domain, &iommu->domain_list, next) { + if (!(domain->prot & IOMMU_CACHE)) { + ret = 0; + break; + } + } + mutex_unlock(&iommu->lock); + + return ret; +} + +static long vfio_iommu_type1_ioctl(void *iommu_data, + unsigned int cmd, unsigned long arg) +{ + struct vfio_iommu *iommu = iommu_data; + unsigned long minsz; + + if (cmd == VFIO_CHECK_EXTENSION) { + switch (arg) { + case VFIO_TYPE1_IOMMU: + case VFIO_TYPE1v2_IOMMU: + case VFIO_TYPE1_NESTING_IOMMU: + return 1; + case VFIO_DMA_CC_IOMMU: + if (!iommu) + return 0; + return vfio_domains_have_iommu_cache(iommu); + default: + return 0; + } + } else if (cmd == VFIO_IOMMU_GET_INFO) { + struct vfio_iommu_type1_info info; + + minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + info.flags = VFIO_IOMMU_INFO_PGSIZES; + + info.iova_pgsizes = vfio_pgsize_bitmap(iommu); + + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; + + } else if (cmd == VFIO_IOMMU_MAP_DMA) { + struct vfio_iommu_type1_dma_map map; + uint32_t mask = VFIO_DMA_MAP_FLAG_READ | + VFIO_DMA_MAP_FLAG_WRITE; + + minsz = offsetofend(struct vfio_iommu_type1_dma_map, size); + + if (copy_from_user(&map, (void __user *)arg, minsz)) + return -EFAULT; + + if (map.argsz < minsz || map.flags & ~mask) + return -EINVAL; + + return vfio_dma_do_map(iommu, &map); + + } else if (cmd == VFIO_IOMMU_UNMAP_DMA) { + struct vfio_iommu_type1_dma_unmap unmap; + long ret; + + minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size); + + if (copy_from_user(&unmap, (void __user *)arg, minsz)) + return -EFAULT; + + if (unmap.argsz < minsz || unmap.flags) + return -EINVAL; + + ret = vfio_dma_do_unmap(iommu, &unmap); + if (ret) + return ret; + + return copy_to_user((void __user *)arg, &unmap, minsz) ? + -EFAULT : 0; + } + + return -ENOTTY; +} + +static int vfio_iommu_type1_register_notifier(void *iommu_data, + unsigned long *events, + struct notifier_block *nb) +{ + struct vfio_iommu *iommu = iommu_data; + + /* clear known events */ + *events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP; + + /* refuse to register if still events remaining */ + if (*events) + return -EINVAL; + + return blocking_notifier_chain_register(&iommu->notifier, nb); +} + +static int vfio_iommu_type1_unregister_notifier(void *iommu_data, + struct notifier_block *nb) +{ + struct vfio_iommu *iommu = iommu_data; + + return blocking_notifier_chain_unregister(&iommu->notifier, nb); +} + +static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { + .name = "vfio-iommu-type1", + .owner = THIS_MODULE, + .open = vfio_iommu_type1_open, + .release = vfio_iommu_type1_release, + .ioctl = vfio_iommu_type1_ioctl, + .attach_group = vfio_iommu_type1_attach_group, + .detach_group = vfio_iommu_type1_detach_group, + .pin_pages = vfio_iommu_type1_pin_pages, + .unpin_pages = vfio_iommu_type1_unpin_pages, + .register_notifier = vfio_iommu_type1_register_notifier, + .unregister_notifier = vfio_iommu_type1_unregister_notifier, +}; + +static int __init vfio_iommu_type1_init(void) +{ + return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1); +} + +static void __exit vfio_iommu_type1_cleanup(void) +{ + vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1); +} + +module_init(vfio_iommu_type1_init); +module_exit(vfio_iommu_type1_cleanup); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/vfio/vfio_spapr_eeh.c b/drivers/vfio/vfio_spapr_eeh.c new file mode 100644 index 000000000..38edeb472 --- /dev/null +++ b/drivers/vfio/vfio_spapr_eeh.c @@ -0,0 +1,110 @@ +/* + * EEH functionality support for VFIO devices. The feature is only + * available on sPAPR compatible platforms. + * + * Copyright Gavin Shan, IBM Corporation 2014. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/uaccess.h> +#include <linux/vfio.h> +#include <asm/eeh.h> + +#define DRIVER_VERSION "0.1" +#define DRIVER_AUTHOR "Gavin Shan, IBM Corporation" +#define DRIVER_DESC "VFIO IOMMU SPAPR EEH" + +/* We might build address mapping here for "fast" path later */ +void vfio_spapr_pci_eeh_open(struct pci_dev *pdev) +{ + eeh_dev_open(pdev); +} +EXPORT_SYMBOL_GPL(vfio_spapr_pci_eeh_open); + +void vfio_spapr_pci_eeh_release(struct pci_dev *pdev) +{ + eeh_dev_release(pdev); +} +EXPORT_SYMBOL_GPL(vfio_spapr_pci_eeh_release); + +long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, + unsigned int cmd, unsigned long arg) +{ + struct eeh_pe *pe; + struct vfio_eeh_pe_op op; + unsigned long minsz; + long ret = -EINVAL; + + switch (cmd) { + case VFIO_CHECK_EXTENSION: + if (arg == VFIO_EEH) + ret = eeh_enabled() ? 1 : 0; + else + ret = 0; + break; + case VFIO_EEH_PE_OP: + pe = eeh_iommu_group_to_pe(group); + if (!pe) + return -ENODEV; + + minsz = offsetofend(struct vfio_eeh_pe_op, op); + if (copy_from_user(&op, (void __user *)arg, minsz)) + return -EFAULT; + if (op.argsz < minsz || op.flags) + return -EINVAL; + + switch (op.op) { + case VFIO_EEH_PE_DISABLE: + ret = eeh_pe_set_option(pe, EEH_OPT_DISABLE); + break; + case VFIO_EEH_PE_ENABLE: + ret = eeh_pe_set_option(pe, EEH_OPT_ENABLE); + break; + case VFIO_EEH_PE_UNFREEZE_IO: + ret = eeh_pe_set_option(pe, EEH_OPT_THAW_MMIO); + break; + case VFIO_EEH_PE_UNFREEZE_DMA: + ret = eeh_pe_set_option(pe, EEH_OPT_THAW_DMA); + break; + case VFIO_EEH_PE_GET_STATE: + ret = eeh_pe_get_state(pe); + break; + case VFIO_EEH_PE_RESET_DEACTIVATE: + ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE); + break; + case VFIO_EEH_PE_RESET_HOT: + ret = eeh_pe_reset(pe, EEH_RESET_HOT); + break; + case VFIO_EEH_PE_RESET_FUNDAMENTAL: + ret = eeh_pe_reset(pe, EEH_RESET_FUNDAMENTAL); + break; + case VFIO_EEH_PE_CONFIGURE: + ret = eeh_pe_configure(pe); + break; + case VFIO_EEH_PE_INJECT_ERR: + minsz = offsetofend(struct vfio_eeh_pe_op, err.mask); + if (op.argsz < minsz) + return -EINVAL; + if (copy_from_user(&op, (void __user *)arg, minsz)) + return -EFAULT; + + ret = eeh_pe_inject_err(pe, op.err.type, op.err.func, + op.err.addr, op.err.mask); + break; + default: + ret = -EINVAL; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(vfio_spapr_iommu_eeh_ioctl); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c new file mode 100644 index 000000000..2a1be859e --- /dev/null +++ b/drivers/vfio/virqfd.c @@ -0,0 +1,226 @@ +/* + * VFIO generic eventfd code for IRQFD support. + * Derived from drivers/vfio/pci/vfio_pci_intrs.c + * + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/vfio.h> +#include <linux/eventfd.h> +#include <linux/file.h> +#include <linux/module.h> +#include <linux/slab.h> + +#define DRIVER_VERSION "0.1" +#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" +#define DRIVER_DESC "IRQFD support for VFIO bus drivers" + +static struct workqueue_struct *vfio_irqfd_cleanup_wq; +static DEFINE_SPINLOCK(virqfd_lock); + +static int __init vfio_virqfd_init(void) +{ + vfio_irqfd_cleanup_wq = + create_singlethread_workqueue("vfio-irqfd-cleanup"); + if (!vfio_irqfd_cleanup_wq) + return -ENOMEM; + + return 0; +} + +static void __exit vfio_virqfd_exit(void) +{ + destroy_workqueue(vfio_irqfd_cleanup_wq); +} + +static void virqfd_deactivate(struct virqfd *virqfd) +{ + queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown); +} + +static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) +{ + struct virqfd *virqfd = container_of(wait, struct virqfd, wait); + __poll_t flags = key_to_poll(key); + + if (flags & EPOLLIN) { + /* An event has been signaled, call function */ + if ((!virqfd->handler || + virqfd->handler(virqfd->opaque, virqfd->data)) && + virqfd->thread) + schedule_work(&virqfd->inject); + } + + if (flags & EPOLLHUP) { + unsigned long flags; + spin_lock_irqsave(&virqfd_lock, flags); + + /* + * The eventfd is closing, if the virqfd has not yet been + * queued for release, as determined by testing whether the + * virqfd pointer to it is still valid, queue it now. As + * with kvm irqfds, we know we won't race against the virqfd + * going away because we hold the lock to get here. + */ + if (*(virqfd->pvirqfd) == virqfd) { + *(virqfd->pvirqfd) = NULL; + virqfd_deactivate(virqfd); + } + + spin_unlock_irqrestore(&virqfd_lock, flags); + } + + return 0; +} + +static void virqfd_ptable_queue_proc(struct file *file, + wait_queue_head_t *wqh, poll_table *pt) +{ + struct virqfd *virqfd = container_of(pt, struct virqfd, pt); + add_wait_queue(wqh, &virqfd->wait); +} + +static void virqfd_shutdown(struct work_struct *work) +{ + struct virqfd *virqfd = container_of(work, struct virqfd, shutdown); + u64 cnt; + + eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt); + flush_work(&virqfd->inject); + eventfd_ctx_put(virqfd->eventfd); + + kfree(virqfd); +} + +static void virqfd_inject(struct work_struct *work) +{ + struct virqfd *virqfd = container_of(work, struct virqfd, inject); + if (virqfd->thread) + virqfd->thread(virqfd->opaque, virqfd->data); +} + +int vfio_virqfd_enable(void *opaque, + int (*handler)(void *, void *), + void (*thread)(void *, void *), + void *data, struct virqfd **pvirqfd, int fd) +{ + struct fd irqfd; + struct eventfd_ctx *ctx; + struct virqfd *virqfd; + int ret = 0; + __poll_t events; + + virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL); + if (!virqfd) + return -ENOMEM; + + virqfd->pvirqfd = pvirqfd; + virqfd->opaque = opaque; + virqfd->handler = handler; + virqfd->thread = thread; + virqfd->data = data; + + INIT_WORK(&virqfd->shutdown, virqfd_shutdown); + INIT_WORK(&virqfd->inject, virqfd_inject); + + irqfd = fdget(fd); + if (!irqfd.file) { + ret = -EBADF; + goto err_fd; + } + + ctx = eventfd_ctx_fileget(irqfd.file); + if (IS_ERR(ctx)) { + ret = PTR_ERR(ctx); + goto err_ctx; + } + + virqfd->eventfd = ctx; + + /* + * virqfds can be released by closing the eventfd or directly + * through ioctl. These are both done through a workqueue, so + * we update the pointer to the virqfd under lock to avoid + * pushing multiple jobs to release the same virqfd. + */ + spin_lock_irq(&virqfd_lock); + + if (*pvirqfd) { + spin_unlock_irq(&virqfd_lock); + ret = -EBUSY; + goto err_busy; + } + *pvirqfd = virqfd; + + spin_unlock_irq(&virqfd_lock); + + /* + * Install our own custom wake-up handling so we are notified via + * a callback whenever someone signals the underlying eventfd. + */ + init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup); + init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc); + + events = vfs_poll(irqfd.file, &virqfd->pt); + + /* + * Check if there was an event already pending on the eventfd + * before we registered and trigger it as if we didn't miss it. + */ + if (events & EPOLLIN) { + if ((!handler || handler(opaque, data)) && thread) + schedule_work(&virqfd->inject); + } + + /* + * Do not drop the file until the irqfd is fully initialized, + * otherwise we might race against the EPOLLHUP. + */ + fdput(irqfd); + + return 0; +err_busy: + eventfd_ctx_put(ctx); +err_ctx: + fdput(irqfd); +err_fd: + kfree(virqfd); + + return ret; +} +EXPORT_SYMBOL_GPL(vfio_virqfd_enable); + +void vfio_virqfd_disable(struct virqfd **pvirqfd) +{ + unsigned long flags; + + spin_lock_irqsave(&virqfd_lock, flags); + + if (*pvirqfd) { + virqfd_deactivate(*pvirqfd); + *pvirqfd = NULL; + } + + spin_unlock_irqrestore(&virqfd_lock, flags); + + /* + * Block until we know all outstanding shutdown jobs have completed. + * Even if we don't queue the job, flush the wq to be sure it's + * been released. + */ + flush_workqueue(vfio_irqfd_cleanup_wq); +} +EXPORT_SYMBOL_GPL(vfio_virqfd_disable); + +module_init(vfio_virqfd_init); +module_exit(vfio_virqfd_exit); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); |