summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel/iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/intel/iommu.c')
-rw-r--r--drivers/iommu/intel/iommu.c447
1 files changed, 191 insertions, 256 deletions
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index a8366b1f4f..d7e10f1311 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -46,9 +46,6 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
-#define MAX_AGAW_WIDTH 64
-#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
-
#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
@@ -63,74 +60,6 @@
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
-/* page table handling */
-#define LEVEL_STRIDE (9)
-#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
-
-static inline int agaw_to_level(int agaw)
-{
- return agaw + 2;
-}
-
-static inline int agaw_to_width(int agaw)
-{
- return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
-}
-
-static inline int width_to_agaw(int width)
-{
- return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
-}
-
-static inline unsigned int level_to_offset_bits(int level)
-{
- return (level - 1) * LEVEL_STRIDE;
-}
-
-static inline int pfn_level_offset(u64 pfn, int level)
-{
- return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
-}
-
-static inline u64 level_mask(int level)
-{
- return -1ULL << level_to_offset_bits(level);
-}
-
-static inline u64 level_size(int level)
-{
- return 1ULL << level_to_offset_bits(level);
-}
-
-static inline u64 align_to_level(u64 pfn, int level)
-{
- return (pfn + level_size(level) - 1) & level_mask(level);
-}
-
-static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
-{
- return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
-}
-
-/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
- are never going to work. */
-static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
-{
- return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
-}
-static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
-{
- return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
-}
-static inline unsigned long page_to_dma_pfn(struct page *pg)
-{
- return mm_to_dma_pfn_start(page_to_pfn(pg));
-}
-static inline unsigned long virt_to_dma_pfn(void *p)
-{
- return page_to_dma_pfn(virt_to_page(p));
-}
-
static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
@@ -168,76 +97,79 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
return re->hi & VTD_PAGE_MASK;
}
-static inline void context_set_present(struct context_entry *context)
+static int device_rid_cmp_key(const void *key, const struct rb_node *node)
{
- context->lo |= 1;
-}
+ struct device_domain_info *info =
+ rb_entry(node, struct device_domain_info, node);
+ const u16 *rid_lhs = key;
-static inline void context_set_fault_enable(struct context_entry *context)
-{
- context->lo &= (((u64)-1) << 2) | 1;
-}
+ if (*rid_lhs < PCI_DEVID(info->bus, info->devfn))
+ return -1;
-static inline void context_set_translation_type(struct context_entry *context,
- unsigned long value)
-{
- context->lo &= (((u64)-1) << 4) | 3;
- context->lo |= (value & 3) << 2;
-}
+ if (*rid_lhs > PCI_DEVID(info->bus, info->devfn))
+ return 1;
-static inline void context_set_address_root(struct context_entry *context,
- unsigned long value)
-{
- context->lo &= ~VTD_PAGE_MASK;
- context->lo |= value & VTD_PAGE_MASK;
+ return 0;
}
-static inline void context_set_address_width(struct context_entry *context,
- unsigned long value)
+static int device_rid_cmp(struct rb_node *lhs, const struct rb_node *rhs)
{
- context->hi |= value & 7;
-}
+ struct device_domain_info *info =
+ rb_entry(lhs, struct device_domain_info, node);
+ u16 key = PCI_DEVID(info->bus, info->devfn);
-static inline void context_set_domain_id(struct context_entry *context,
- unsigned long value)
-{
- context->hi |= (value & ((1 << 16) - 1)) << 8;
+ return device_rid_cmp_key(&key, rhs);
}
-static inline void context_set_pasid(struct context_entry *context)
+/*
+ * Looks up an IOMMU-probed device using its source ID.
+ *
+ * Returns the pointer to the device if there is a match. Otherwise,
+ * returns NULL.
+ *
+ * Note that this helper doesn't guarantee that the device won't be
+ * released by the iommu subsystem after being returned. The caller
+ * should use its own synchronization mechanism to avoid the device
+ * being released during its use if its possibly the case.
+ */
+struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid)
{
- context->lo |= CONTEXT_PASIDE;
-}
+ struct device_domain_info *info = NULL;
+ struct rb_node *node;
+ unsigned long flags;
-static inline int context_domain_id(struct context_entry *c)
-{
- return((c->hi >> 8) & 0xffff);
-}
+ spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
+ node = rb_find(&rid, &iommu->device_rbtree, device_rid_cmp_key);
+ if (node)
+ info = rb_entry(node, struct device_domain_info, node);
+ spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
-static inline void context_clear_entry(struct context_entry *context)
-{
- context->lo = 0;
- context->hi = 0;
+ return info ? info->dev : NULL;
}
-static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+static int device_rbtree_insert(struct intel_iommu *iommu,
+ struct device_domain_info *info)
{
- if (!iommu->copied_tables)
- return false;
+ struct rb_node *curr;
+ unsigned long flags;
- return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
-}
+ spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
+ curr = rb_find_add(&info->node, &iommu->device_rbtree, device_rid_cmp);
+ spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
+ if (WARN_ON(curr))
+ return -EEXIST;
-static inline void
-set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
-{
- set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+ return 0;
}
-static inline void
-clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+static void device_rbtree_remove(struct device_domain_info *info)
{
- clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+ struct intel_iommu *iommu = info->iommu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
+ rb_erase(&info->node, &iommu->device_rbtree);
+ spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
}
/*
@@ -383,13 +315,12 @@ void free_pgtable_page(void *vaddr)
free_page((unsigned long)vaddr);
}
-static inline int domain_type_is_si(struct dmar_domain *domain)
+static int domain_type_is_si(struct dmar_domain *domain)
{
return domain->domain.type == IOMMU_DOMAIN_IDENTITY;
}
-static inline int domain_pfn_supported(struct dmar_domain *domain,
- unsigned long pfn)
+static int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
@@ -451,7 +382,7 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
}
-static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
+static bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
{
return sm_supported(iommu) ?
ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
@@ -701,7 +632,7 @@ static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
return false;
}
-struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
+static struct intel_iommu *device_lookup_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
struct pci_dev *pdev = NULL;
@@ -1510,6 +1441,46 @@ static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
spin_unlock_irqrestore(&domain->lock, flags);
}
+static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
+ unsigned long pfn, unsigned int pages,
+ int ih)
+{
+ unsigned int aligned_pages = __roundup_pow_of_two(pages);
+ unsigned long bitmask = aligned_pages - 1;
+ unsigned int mask = ilog2(aligned_pages);
+ u64 addr = (u64)pfn << VTD_PAGE_SHIFT;
+
+ /*
+ * PSI masks the low order bits of the base address. If the
+ * address isn't aligned to the mask, then compute a mask value
+ * needed to ensure the target range is flushed.
+ */
+ if (unlikely(bitmask & pfn)) {
+ unsigned long end_pfn = pfn + pages - 1, shared_bits;
+
+ /*
+ * Since end_pfn <= pfn + bitmask, the only way bits
+ * higher than bitmask can differ in pfn and end_pfn is
+ * by carrying. This means after masking out bitmask,
+ * high bits starting with the first set bit in
+ * shared_bits are all equal in both pfn and end_pfn.
+ */
+ shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
+ mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
+ }
+
+ /*
+ * Fallback to domain selective flush if no PSI support or
+ * the size is too big.
+ */
+ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH);
+ else
+ iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
+ DMA_TLB_PSI_FLUSH);
+}
+
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
struct dmar_domain *domain,
unsigned long pfn, unsigned int pages,
@@ -1526,42 +1497,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
if (ih)
ih = 1 << 6;
- if (domain->use_first_level) {
+ if (domain->use_first_level)
domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
- } else {
- unsigned long bitmask = aligned_pages - 1;
-
- /*
- * PSI masks the low order bits of the base address. If the
- * address isn't aligned to the mask, then compute a mask value
- * needed to ensure the target range is flushed.
- */
- if (unlikely(bitmask & pfn)) {
- unsigned long end_pfn = pfn + pages - 1, shared_bits;
-
- /*
- * Since end_pfn <= pfn + bitmask, the only way bits
- * higher than bitmask can differ in pfn and end_pfn is
- * by carrying. This means after masking out bitmask,
- * high bits starting with the first set bit in
- * shared_bits are all equal in both pfn and end_pfn.
- */
- shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
- mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
- }
-
- /*
- * Fallback to domain selective flush if no PSI support or
- * the size is too big.
- */
- if (!cap_pgsel_inv(iommu->cap) ||
- mask > cap_max_amask_val(iommu->cap))
- iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH);
- else
- iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
- DMA_TLB_PSI_FLUSH);
- }
+ else
+ __iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih);
/*
* In caching mode, changes of pages from non-present to present require
@@ -1572,9 +1511,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
}
/* Notification for newly created mappings */
-static inline void __mapping_notify_one(struct intel_iommu *iommu,
- struct dmar_domain *domain,
- unsigned long pfn, unsigned int pages)
+static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *domain,
+ unsigned long pfn, unsigned int pages)
{
/*
* It's a non-present to present mapping. Only flush if caching mode
@@ -1586,6 +1524,46 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
iommu_flush_write_buffer(iommu);
}
+/*
+ * Flush the relevant caches in nested translation if the domain
+ * also serves as a parent
+ */
+static void parent_domain_flush(struct dmar_domain *domain,
+ unsigned long pfn,
+ unsigned long pages, int ih)
+{
+ struct dmar_domain *s1_domain;
+
+ spin_lock(&domain->s1_lock);
+ list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+ struct device_domain_info *device_info;
+ struct iommu_domain_info *info;
+ unsigned long flags;
+ unsigned long i;
+
+ xa_for_each(&s1_domain->iommu_array, i, info)
+ __iommu_flush_iotlb_psi(info->iommu, info->did,
+ pfn, pages, ih);
+
+ if (!s1_domain->has_iotlb_device)
+ continue;
+
+ spin_lock_irqsave(&s1_domain->lock, flags);
+ list_for_each_entry(device_info, &s1_domain->devices, link)
+ /*
+ * Address translation cache in device side caches the
+ * result of nested translation. There is no easy way
+ * to identify the exact set of nested translations
+ * affected by a change in S2. So just flush the entire
+ * device cache.
+ */
+ __iommu_flush_dev_iotlb(device_info, 0,
+ MAX_AGAW_PFN_WIDTH);
+ spin_unlock_irqrestore(&s1_domain->lock, flags);
+ }
+ spin_unlock(&domain->s1_lock);
+}
+
static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
@@ -1605,6 +1583,9 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
if (!cap_caching_mode(iommu->cap))
iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH);
}
+
+ if (dmar_domain->nested_parent)
+ parent_domain_flush(dmar_domain, 0, -1, 0);
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1841,7 +1822,7 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
spin_unlock(&iommu->lock);
}
-static inline int guestwidth_to_adjustwidth(int gaw)
+static int guestwidth_to_adjustwidth(int gaw)
{
int agaw;
int r = (gaw - 12) % 9;
@@ -1875,7 +1856,7 @@ static void domain_exit(struct dmar_domain *domain)
* Value of X in the PDTS field of a scalable mode context entry
* indicates PASID directory with 2^(X + 7) entries.
*/
-static inline unsigned long context_get_sm_pds(struct pasid_table *table)
+static unsigned long context_get_sm_pds(struct pasid_table *table)
{
unsigned long pds, max_pde;
@@ -1887,38 +1868,6 @@ static inline unsigned long context_get_sm_pds(struct pasid_table *table)
return pds - 7;
}
-/*
- * Set the RID_PASID field of a scalable mode context entry. The
- * IOMMU hardware will use the PASID value set in this field for
- * DMA translations of DMA requests without PASID.
- */
-static inline void
-context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
-{
- context->hi |= pasid & ((1 << 20) - 1);
-}
-
-/*
- * Set the DTE(Device-TLB Enable) field of a scalable mode context
- * entry.
- */
-static inline void context_set_sm_dte(struct context_entry *context)
-{
- context->lo |= BIT_ULL(2);
-}
-
-/*
- * Set the PRE(Page Request Enable) field of a scalable mode context
- * entry.
- */
-static inline void context_set_sm_pre(struct context_entry *context)
-{
- context->lo |= BIT_ULL(4);
-}
-
-/* Convert value to context PASID directory size field coding. */
-#define context_pdts(pds) (((pds) & 0x7) << 9)
-
static int domain_context_mapping_one(struct dmar_domain *domain,
struct intel_iommu *iommu,
struct pasid_table *table,
@@ -2079,14 +2028,11 @@ static int domain_context_mapping_cb(struct pci_dev *pdev,
static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
struct domain_context_mapping_data data;
+ struct intel_iommu *iommu = info->iommu;
+ u8 bus = info->bus, devfn = info->devfn;
struct pasid_table *table;
- struct intel_iommu *iommu;
- u8 bus, devfn;
-
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
table = intel_pasid_get_table(dev);
@@ -2103,18 +2049,15 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
}
/* Returns a number of VTD pages, but aligned to MM page size */
-static inline unsigned long aligned_nrpages(unsigned long host_addr,
- size_t size)
+static unsigned long aligned_nrpages(unsigned long host_addr, size_t size)
{
host_addr &= ~PAGE_MASK;
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}
/* Return largest possible superpage level for a given mapping */
-static inline int hardware_largepage_caps(struct dmar_domain *domain,
- unsigned long iov_pfn,
- unsigned long phy_pfn,
- unsigned long pages)
+static int hardware_largepage_caps(struct dmar_domain *domain, unsigned long iov_pfn,
+ unsigned long phy_pfn, unsigned long pages)
{
int support, level = 1;
unsigned long pfnmerge;
@@ -2166,6 +2109,9 @@ static void switch_to_super_page(struct dmar_domain *domain,
iommu_flush_iotlb_psi(info->iommu, domain,
start_pfn, lvl_pages,
0, 0);
+ if (domain->nested_parent)
+ parent_domain_flush(domain, start_pfn,
+ lvl_pages, 0);
}
pte++;
@@ -2447,15 +2393,10 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu;
+ struct intel_iommu *iommu = info->iommu;
unsigned long flags;
- u8 bus, devfn;
int ret;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
-
ret = domain_attach_iommu(domain, iommu);
if (ret)
return ret;
@@ -2468,7 +2409,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
/* Setup the PASID entry for requests without PASID: */
if (hw_pass_through && domain_type_is_si(domain))
- ret = intel_pasid_setup_pass_through(iommu, domain,
+ ret = intel_pasid_setup_pass_through(iommu,
dev, IOMMU_NO_PASID);
else if (domain->use_first_level)
ret = domain_setup_first_level(iommu, domain, dev,
@@ -3613,7 +3554,7 @@ void intel_iommu_shutdown(void)
up_write(&dmar_global_lock);
}
-static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
+static struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{
struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
@@ -3692,7 +3633,7 @@ const struct attribute_group *intel_iommu_groups[] = {
NULL,
};
-static inline bool has_external_pci(void)
+static bool has_external_pci(void)
{
struct pci_dev *pdev = NULL;
@@ -3933,30 +3874,6 @@ static void domain_context_clear(struct device_domain_info *info)
&domain_context_clear_one_cb, info);
}
-static void dmar_remove_one_dev_info(struct device *dev)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct dmar_domain *domain = info->domain;
- struct intel_iommu *iommu = info->iommu;
- unsigned long flags;
-
- if (!dev_is_real_dma_subdevice(info->dev)) {
- if (dev_is_pci(info->dev) && sm_supported(iommu))
- intel_pasid_tear_down_entry(iommu, info->dev,
- IOMMU_NO_PASID, false);
-
- iommu_disable_pci_caps(info);
- domain_context_clear(info);
- }
-
- spin_lock_irqsave(&domain->lock, flags);
- list_del(&info->link);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- domain_detach_iommu(domain, iommu);
- info->domain = NULL;
-}
-
/*
* Clear the page table pointer in context or pasid table entries so that
* all DMA requests without PASID from the device are blocked. If the page
@@ -4127,14 +4044,11 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
int prepare_domain_attach_device(struct iommu_domain *domain,
struct device *dev)
{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct intel_iommu *iommu;
+ struct intel_iommu *iommu = info->iommu;
int addr_width;
- iommu = device_to_iommu(dev, NULL, NULL);
- if (!iommu)
- return -ENODEV;
-
if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
return -EINVAL;
@@ -4306,6 +4220,9 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
start_pfn, nrpages,
list_empty(&gather->freelist), 0);
+ if (dmar_domain->nested_parent)
+ parent_domain_flush(dmar_domain, start_pfn, nrpages,
+ list_empty(&gather->freelist));
put_pages_list(&gather->freelist);
}
@@ -4411,7 +4328,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
u8 bus, devfn;
int ret;
- iommu = device_to_iommu(dev, &bus, &devfn);
+ iommu = device_lookup_iommu(dev, &bus, &devfn);
if (!iommu || !iommu->iommu.ops)
return ERR_PTR(-ENODEV);
@@ -4464,30 +4381,47 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
dev_iommu_priv_set(dev, info);
+ if (pdev && pci_ats_supported(pdev)) {
+ ret = device_rbtree_insert(iommu, info);
+ if (ret)
+ goto free;
+ }
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
dev_err(dev, "PASID table allocation failed\n");
- dev_iommu_priv_set(dev, NULL);
- kfree(info);
- return ERR_PTR(ret);
+ goto clear_rbtree;
}
}
intel_iommu_debugfs_create_dev(info);
return &iommu->iommu;
+clear_rbtree:
+ device_rbtree_remove(info);
+free:
+ kfree(info);
+
+ return ERR_PTR(ret);
}
static void intel_iommu_release_device(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+
+ mutex_lock(&iommu->iopf_lock);
+ if (dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)))
+ device_rbtree_remove(info);
+ mutex_unlock(&iommu->iopf_lock);
+
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
+ !context_copied(iommu, info->bus, info->devfn))
+ intel_pasid_teardown_sm_context(dev);
- dmar_remove_one_dev_info(dev);
intel_pasid_free_table(dev);
intel_iommu_debugfs_remove_dev(info);
- dev_iommu_priv_set(dev, NULL);
kfree(info);
set_dma_ops(dev, NULL);
}
@@ -4747,8 +4681,9 @@ static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
{
- struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dev_pasid_info *curr, *dev_pasid = NULL;
+ struct intel_iommu *iommu = info->iommu;
struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
unsigned long flags;
@@ -4819,8 +4754,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
goto out_free;
if (domain_type_is_si(dmar_domain))
- ret = intel_pasid_setup_pass_through(iommu, dmar_domain,
- dev, pasid);
+ ret = intel_pasid_setup_pass_through(iommu, dev, pasid);
else if (dmar_domain->use_first_level)
ret = domain_setup_first_level(iommu, dmar_domain,
dev, pasid);
@@ -4991,6 +4925,7 @@ static const struct iommu_dirty_ops intel_dirty_ops = {
const struct iommu_ops intel_iommu_ops = {
.blocked_domain = &blocking_domain,
+ .release_domain = &blocking_domain,
.capable = intel_iommu_capable,
.hw_info = intel_iommu_hw_info,
.domain_alloc = intel_iommu_domain_alloc,