summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/iommu.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
commit01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch)
treeb406c5242a088c4f59c6e4b719b783f43aca6ae9 /drivers/iommu/iommu.c
parentAdding upstream version 6.7.12. (diff)
downloadlinux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz
linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/iommu/iommu.c')
-rw-r--r--drivers/iommu/iommu.c200
1 files changed, 144 insertions, 56 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 33e2a9b5d3..ad33161f23 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -148,7 +148,7 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
static LIST_HEAD(iommu_device_list);
static DEFINE_SPINLOCK(iommu_device_lock);
-static struct bus_type * const iommu_buses[] = {
+static const struct bus_type * const iommu_buses[] = {
&platform_bus_type,
#ifdef CONFIG_PCI
&pci_bus_type,
@@ -257,13 +257,6 @@ int iommu_device_register(struct iommu_device *iommu,
/* We need to be able to take module references appropriately */
if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
return -EINVAL;
- /*
- * Temporarily enforce global restriction to a single driver. This was
- * already the de-facto behaviour, since any possible combination of
- * existing drivers would compete for at least the PCI or platform bus.
- */
- if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops)
- return -EBUSY;
iommu->ops = ops;
if (hwdev)
@@ -273,10 +266,8 @@ int iommu_device_register(struct iommu_device *iommu,
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
- for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) {
- iommu_buses[i]->iommu_ops = ops;
+ for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++)
err = bus_iommu_probe(iommu_buses[i]);
- }
if (err)
iommu_device_unregister(iommu);
return err;
@@ -329,7 +320,6 @@ int iommu_device_register_bus(struct iommu_device *iommu,
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
- bus->iommu_ops = ops;
err = bus_iommu_probe(bus);
if (err) {
iommu_device_unregister_bus(iommu, bus, nb);
@@ -344,6 +334,8 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
{
struct dev_iommu *param = dev->iommu;
+ lockdep_assert_held(&iommu_probe_device_lock);
+
if (param)
return param;
@@ -368,6 +360,15 @@ static void dev_iommu_free(struct device *dev)
kfree(param);
}
+/*
+ * Internal equivalent of device_iommu_mapped() for when we care that a device
+ * actually has API ops, and don't want false positives from VFIO-only groups.
+ */
+static bool dev_has_iommu(struct device *dev)
+{
+ return dev->iommu && dev->iommu->iommu_dev;
+}
+
static u32 dev_iommu_get_max_pasids(struct device *dev)
{
u32 max_pasids = 0, bits = 0;
@@ -386,6 +387,15 @@ static u32 dev_iommu_get_max_pasids(struct device *dev)
return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids);
}
+void dev_iommu_priv_set(struct device *dev, void *priv)
+{
+ /* FSL_PAMU does something weird */
+ if (!IS_ENABLED(CONFIG_FSL_PAMU))
+ lockdep_assert_held(&iommu_probe_device_lock);
+ dev->iommu->priv = priv;
+}
+EXPORT_SYMBOL_GPL(dev_iommu_priv_set);
+
/*
* Init the dev->iommu and dev->iommu_group in the struct device and get the
* driver probed
@@ -453,13 +463,24 @@ static void iommu_deinit_device(struct device *dev)
/*
* release_device() must stop using any attached domain on the device.
- * If there are still other devices in the group they are not effected
+ * If there are still other devices in the group, they are not affected
* by this callback.
*
- * The IOMMU driver must set the device to either an identity or
- * blocking translation and stop using any domain pointer, as it is
- * going to be freed.
+ * If the iommu driver provides release_domain, the core code ensures
+ * that domain is attached prior to calling release_device. Drivers can
+ * use this to enforce a translation on the idle iommu. Typically, the
+ * global static blocked_domain is a good choice.
+ *
+ * Otherwise, the iommu driver must set the device to either an identity
+ * or a blocking translation in release_device() and stop using any
+ * domain pointer, as it is going to be freed.
+ *
+ * Regardless, if a delayed attach never occurred, then the release
+ * should still avoid touching any hardware configuration either.
*/
+ if (!dev->iommu->attach_deferred && ops->release_domain)
+ ops->release_domain->ops->attach_dev(ops->release_domain, dev);
+
if (ops->release_device)
ops->release_device(dev);
@@ -489,11 +510,26 @@ DEFINE_MUTEX(iommu_probe_device_lock);
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops;
+ struct iommu_fwspec *fwspec;
struct iommu_group *group;
struct group_device *gdev;
int ret;
+ /*
+ * For FDT-based systems and ACPI IORT/VIOT, drivers register IOMMU
+ * instances with non-NULL fwnodes, and client devices should have been
+ * identified with a fwspec by this point. Otherwise, we can currently
+ * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can
+ * be present, and that any of their registered instances has suitable
+ * ops for probing, and thus cheekily co-opt the same mechanism.
+ */
+ fwspec = dev_iommu_fwspec_get(dev);
+ if (fwspec && fwspec->ops)
+ ops = fwspec->ops;
+ else
+ ops = iommu_ops_from_fwnode(NULL);
+
if (!ops)
return -ENODEV;
/*
@@ -618,7 +654,7 @@ static void __iommu_group_remove_device(struct device *dev)
list_del(&device->list);
__iommu_group_free_device(group, device);
- if (dev->iommu && dev->iommu->iommu_dev)
+ if (dev_has_iommu(dev))
iommu_deinit_device(dev);
else
dev->iommu_group = NULL;
@@ -817,7 +853,7 @@ int iommu_get_group_resv_regions(struct iommu_group *group,
* Non-API groups still expose reserved_regions in sysfs,
* so filter out calls that get here that way.
*/
- if (!device->dev->iommu)
+ if (!dev_has_iommu(device->dev))
break;
INIT_LIST_HEAD(&dev_resv_regions);
@@ -1223,6 +1259,12 @@ void iommu_group_remove_device(struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_group_remove_device);
+static struct device *iommu_group_first_dev(struct iommu_group *group)
+{
+ lockdep_assert_held(&group->mutex);
+ return list_first_entry(&group->devices, struct group_device, list)->dev;
+}
+
/**
* iommu_group_for_each_dev - iterate over each device in the group
* @group: the group
@@ -1751,30 +1793,13 @@ __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
}
/*
- * Returns the iommu_ops for the devices in an iommu group.
- *
- * It is assumed that all devices in an iommu group are managed by a single
- * IOMMU unit. Therefore, this returns the dev_iommu_ops of the first device
- * in the group.
- */
-static const struct iommu_ops *group_iommu_ops(struct iommu_group *group)
-{
- struct group_device *device =
- list_first_entry(&group->devices, struct group_device, list);
-
- lockdep_assert_held(&group->mutex);
-
- return dev_iommu_ops(device->dev);
-}
-
-/*
* req_type of 0 means "auto" which means to select a domain based on
* iommu_def_domain_type or what the driver actually supports.
*/
static struct iommu_domain *
iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
{
- const struct iommu_ops *ops = group_iommu_ops(group);
+ const struct iommu_ops *ops = dev_iommu_ops(iommu_group_first_dev(group));
struct iommu_domain *dom;
lockdep_assert_held(&group->mutex);
@@ -1785,7 +1810,7 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
* domain. Do not use in new drivers.
*/
if (ops->default_domain) {
- if (req_type)
+ if (req_type != ops->default_domain->type)
return ERR_PTR(-EINVAL);
return ops->default_domain;
}
@@ -1854,13 +1879,21 @@ static int iommu_bus_notifier(struct notifier_block *nb,
static int iommu_get_def_domain_type(struct iommu_group *group,
struct device *dev, int cur_type)
{
- const struct iommu_ops *ops = group_iommu_ops(group);
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
int type;
- if (!ops->def_domain_type)
- return cur_type;
-
- type = ops->def_domain_type(dev);
+ if (ops->default_domain) {
+ /*
+ * Drivers that declare a global static default_domain will
+ * always choose that.
+ */
+ type = ops->default_domain->type;
+ } else {
+ if (ops->def_domain_type)
+ type = ops->def_domain_type(dev);
+ else
+ return cur_type;
+ }
if (!type || cur_type == type)
return cur_type;
if (!cur_type)
@@ -2003,9 +2036,28 @@ int bus_iommu_probe(const struct bus_type *bus)
return 0;
}
+/**
+ * iommu_present() - make platform-specific assumptions about an IOMMU
+ * @bus: bus to check
+ *
+ * Do not use this function. You want device_iommu_mapped() instead.
+ *
+ * Return: true if some IOMMU is present and aware of devices on the given bus;
+ * in general it may not be the only IOMMU, and it may not have anything to do
+ * with whatever device you are ultimately interested in.
+ */
bool iommu_present(const struct bus_type *bus)
{
- return bus->iommu_ops != NULL;
+ bool ret = false;
+
+ for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) {
+ if (iommu_buses[i] == bus) {
+ spin_lock(&iommu_device_lock);
+ ret = !list_empty(&iommu_device_list);
+ spin_unlock(&iommu_device_lock);
+ }
+ }
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_present);
@@ -2021,7 +2073,7 @@ bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
{
const struct iommu_ops *ops;
- if (!dev->iommu || !dev->iommu->iommu_dev)
+ if (!dev_has_iommu(dev))
return false;
ops = dev_iommu_ops(dev);
@@ -2107,6 +2159,7 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
return ERR_PTR(-ENOMEM);
domain->type = type;
+ domain->owner = ops;
/*
* If not already set, assume all sizes by default; the driver
* may override this later
@@ -2132,21 +2185,37 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
static struct iommu_domain *
__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type)
{
- struct device *dev =
- list_first_entry(&group->devices, struct group_device, list)
- ->dev;
+ struct device *dev = iommu_group_first_dev(group);
+
+ return __iommu_domain_alloc(dev_iommu_ops(dev), dev, type);
+}
+
+static int __iommu_domain_alloc_dev(struct device *dev, void *data)
+{
+ const struct iommu_ops **ops = data;
- return __iommu_domain_alloc(group_iommu_ops(group), dev, type);
+ if (!dev_has_iommu(dev))
+ return 0;
+
+ if (WARN_ONCE(*ops && *ops != dev_iommu_ops(dev),
+ "Multiple IOMMU drivers present for bus %s, which the public IOMMU API can't fully support yet. You will still need to disable one or more for this to work, sorry!\n",
+ dev_bus_name(dev)))
+ return -EBUSY;
+
+ *ops = dev_iommu_ops(dev);
+ return 0;
}
struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
{
+ const struct iommu_ops *ops = NULL;
+ int err = bus_for_each_dev(bus, NULL, &ops, __iommu_domain_alloc_dev);
struct iommu_domain *domain;
- if (bus == NULL || bus->iommu_ops == NULL)
+ if (err || !ops)
return NULL;
- domain = __iommu_domain_alloc(bus->iommu_ops, NULL,
- IOMMU_DOMAIN_UNMANAGED);
+
+ domain = __iommu_domain_alloc(ops, NULL, IOMMU_DOMAIN_UNMANAGED);
if (IS_ERR(domain))
return NULL;
return domain;
@@ -2284,10 +2353,16 @@ struct iommu_domain *iommu_get_dma_domain(struct device *dev)
static int __iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group)
{
+ struct device *dev;
+
if (group->domain && group->domain != group->default_domain &&
group->domain != group->blocking_domain)
return -EBUSY;
+ dev = iommu_group_first_dev(group);
+ if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
+ return -EINVAL;
+
return __iommu_group_set_domain(group, domain);
}
@@ -3004,8 +3079,8 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
*/
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
{
- if (dev->iommu && dev->iommu->iommu_dev) {
- const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
+ if (dev_has_iommu(dev)) {
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops->dev_enable_feat)
return ops->dev_enable_feat(dev, feat);
@@ -3020,8 +3095,8 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
*/
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
{
- if (dev->iommu && dev->iommu->iommu_dev) {
- const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
+ if (dev_has_iommu(dev)) {
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops->dev_disable_feat)
return ops->dev_disable_feat(dev, feat);
@@ -3472,6 +3547,7 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
{
/* Caller must be a probed driver on dev */
struct iommu_group *group = dev->iommu_group;
+ struct group_device *device;
void *curr;
int ret;
@@ -3481,7 +3557,18 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
if (!group)
return -ENODEV;
+ if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner ||
+ pasid == IOMMU_NO_PASID)
+ return -EINVAL;
+
mutex_lock(&group->mutex);
+ for_each_group_device(group, device) {
+ if (pasid >= device->dev->iommu->max_pasids) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
if (curr) {
ret = xa_err(curr) ? : -EBUSY;
@@ -3569,6 +3656,7 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
domain->type = IOMMU_DOMAIN_SVA;
mmgrab(mm);
domain->mm = mm;
+ domain->owner = ops;
domain->iopf_handler = iommu_sva_handle_iopf;
domain->fault_data = mm;