summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd/iommu.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-19 21:00:37 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-19 21:00:37 +0000
commit94ac2ab3fff96814d7460a27a0e9d004abbd4128 (patch)
tree9a4eb8cc234b540b0f4b93363109cdd37a20540b /drivers/iommu/amd/iommu.c
parentAdding debian version 6.8.12-1. (diff)
downloadlinux-94ac2ab3fff96814d7460a27a0e9d004abbd4128.tar.xz
linux-94ac2ab3fff96814d7460a27a0e9d004abbd4128.zip
Merging upstream version 6.9.2.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/iommu/amd/iommu.c')
-rw-r--r--drivers/iommu/amd/iommu.c645
1 files changed, 298 insertions, 347 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index f945bf3253..fb727f5b0b 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -45,10 +45,6 @@
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
-/* IO virtual address start page frame number */
-#define IOVA_START_PFN (1)
-#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
-
/* Reserved IOVA ranges */
#define MSI_RANGE_START (0xfee00000)
#define MSI_RANGE_END (0xfeefffff)
@@ -79,6 +75,9 @@ struct kmem_cache *amd_iommu_irq_cache;
static void detach_device(struct device *dev);
+static void set_dte_entry(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data);
+
/****************************************************************************
*
* Helper functions
@@ -87,7 +86,7 @@ static void detach_device(struct device *dev);
static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
{
- return (pdom && (pdom->flags & PD_IOMMUV2_MASK));
+ return (pdom && (pdom->pd_mode == PD_MODE_V2));
}
static inline int get_acpihid_device_id(struct device *dev,
@@ -1388,14 +1387,9 @@ void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address,
size_t size, ioasid_t pasid, bool gn)
{
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
struct iommu_cmd cmd;
- int qdep;
-
- qdep = dev_data->ats_qdep;
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return -EINVAL;
+ int qdep = dev_data->ats_qdep;
build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address,
size, pasid, gn);
@@ -1415,16 +1409,12 @@ static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
*/
static int device_flush_dte(struct iommu_dev_data *dev_data)
{
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
struct pci_dev *pdev = NULL;
struct amd_iommu_pci_seg *pci_seg;
u16 alias;
int ret;
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return -EINVAL;
-
if (dev_is_pci(dev_data->dev))
pdev = to_pci_dev(dev_data->dev);
@@ -1453,27 +1443,37 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
return ret;
}
-/*
- * TLB invalidation function which is called from the mapping functions.
- * It invalidates a single PTE if the range to flush is within a single
- * page. Otherwise it flushes the whole TLB of the IOMMU.
- */
-static void __domain_flush_pages(struct protection_domain *domain,
+static int domain_flush_pages_v2(struct protection_domain *pdom,
u64 address, size_t size)
{
struct iommu_dev_data *dev_data;
struct iommu_cmd cmd;
- int ret = 0, i;
- ioasid_t pasid = IOMMU_NO_PASID;
- bool gn = false;
+ int ret = 0;
- if (pdom_is_v2_pgtbl_mode(domain))
- gn = true;
+ list_for_each_entry(dev_data, &pdom->dev_list, list) {
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
+ u16 domid = dev_data->gcr3_info.domid;
+
+ build_inv_iommu_pages(&cmd, address, size,
+ domid, IOMMU_NO_PASID, true);
+
+ ret |= iommu_queue_command(iommu, &cmd);
+ }
+
+ return ret;
+}
- build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, gn);
+static int domain_flush_pages_v1(struct protection_domain *pdom,
+ u64 address, size_t size)
+{
+ struct iommu_cmd cmd;
+ int ret = 0, i;
+
+ build_inv_iommu_pages(&cmd, address, size,
+ pdom->id, IOMMU_NO_PASID, false);
for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
- if (!domain->dev_iommu[i])
+ if (!pdom->dev_iommu[i])
continue;
/*
@@ -1483,6 +1483,28 @@ static void __domain_flush_pages(struct protection_domain *domain,
ret |= iommu_queue_command(amd_iommus[i], &cmd);
}
+ return ret;
+}
+
+/*
+ * TLB invalidation function which is called from the mapping functions.
+ * It flushes range of PTEs of the domain.
+ */
+static void __domain_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size)
+{
+ struct iommu_dev_data *dev_data;
+ int ret = 0;
+ ioasid_t pasid = IOMMU_NO_PASID;
+ bool gn = false;
+
+ if (pdom_is_v2_pgtbl_mode(domain)) {
+ gn = true;
+ ret = domain_flush_pages_v2(domain, address, size);
+ } else {
+ ret = domain_flush_pages_v1(domain, address, size);
+ }
+
list_for_each_entry(dev_data, &domain->dev_list, list) {
if (!dev_data->ats_enabled)
@@ -1551,6 +1573,29 @@ static void amd_iommu_domain_flush_all(struct protection_domain *domain)
CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
}
+void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
+ ioasid_t pasid, u64 address, size_t size)
+{
+ struct iommu_cmd cmd;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
+
+ build_inv_iommu_pages(&cmd, address, size,
+ dev_data->gcr3_info.domid, pasid, true);
+ iommu_queue_command(iommu, &cmd);
+
+ if (dev_data->ats_enabled)
+ device_flush_iotlb(dev_data, address, size, pasid, true);
+
+ iommu_completion_wait(iommu);
+}
+
+void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data,
+ ioasid_t pasid)
+{
+ amd_iommu_dev_flush_pasid_pages(dev_data, 0,
+ CMD_INV_IOMMU_ALL_PAGES_ADDRESS, pasid);
+}
+
void amd_iommu_domain_flush_complete(struct protection_domain *domain)
{
int i;
@@ -1592,6 +1637,49 @@ static void domain_flush_devices(struct protection_domain *domain)
device_flush_dte(dev_data);
}
+static void update_device_table(struct protection_domain *domain)
+{
+ struct iommu_dev_data *dev_data;
+
+ list_for_each_entry(dev_data, &domain->dev_list, list) {
+ struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
+
+ set_dte_entry(iommu, dev_data);
+ clone_aliases(iommu, dev_data->dev);
+ }
+}
+
+void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
+{
+ update_device_table(domain);
+ domain_flush_devices(domain);
+}
+
+void amd_iommu_domain_update(struct protection_domain *domain)
+{
+ /* Update device table */
+ amd_iommu_update_and_flush_device_table(domain);
+
+ /* Flush domain TLB(s) and wait for completion */
+ amd_iommu_domain_flush_all(domain);
+}
+
+int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
+ int status, int tag)
+{
+ struct iommu_dev_data *dev_data;
+ struct amd_iommu *iommu;
+ struct iommu_cmd cmd;
+
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+ iommu = get_amd_iommu_from_dev(&pdev->dev);
+
+ build_complete_ppr(&cmd, dev_data->devid, pasid, status,
+ tag, dev_data->pri_tlp);
+
+ return iommu_queue_command(iommu, &cmd);
+}
+
/****************************************************************************
*
* The next functions belong to the domain allocation. A domain is
@@ -1604,26 +1692,29 @@ static void domain_flush_devices(struct protection_domain *domain)
static u16 domain_id_alloc(void)
{
+ unsigned long flags;
int id;
- spin_lock(&pd_bitmap_lock);
+ spin_lock_irqsave(&pd_bitmap_lock, flags);
id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
BUG_ON(id == 0);
if (id > 0 && id < MAX_DOMAIN_ID)
__set_bit(id, amd_iommu_pd_alloc_bitmap);
else
id = 0;
- spin_unlock(&pd_bitmap_lock);
+ spin_unlock_irqrestore(&pd_bitmap_lock, flags);
return id;
}
static void domain_id_free(int id)
{
- spin_lock(&pd_bitmap_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pd_bitmap_lock, flags);
if (id > 0 && id < MAX_DOMAIN_ID)
__clear_bit(id, amd_iommu_pd_alloc_bitmap);
- spin_unlock(&pd_bitmap_lock);
+ spin_unlock_irqrestore(&pd_bitmap_lock, flags);
}
static void free_gcr3_tbl_level1(u64 *tbl)
@@ -1656,16 +1747,22 @@ static void free_gcr3_tbl_level2(u64 *tbl)
}
}
-static void free_gcr3_table(struct protection_domain *domain)
+static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
{
- if (domain->glx == 2)
- free_gcr3_tbl_level2(domain->gcr3_tbl);
- else if (domain->glx == 1)
- free_gcr3_tbl_level1(domain->gcr3_tbl);
+ if (gcr3_info->glx == 2)
+ free_gcr3_tbl_level2(gcr3_info->gcr3_tbl);
+ else if (gcr3_info->glx == 1)
+ free_gcr3_tbl_level1(gcr3_info->gcr3_tbl);
else
- BUG_ON(domain->glx != 0);
+ WARN_ON_ONCE(gcr3_info->glx != 0);
+
+ gcr3_info->glx = 0;
- free_page((unsigned long)domain->gcr3_tbl);
+ /* Free per device domain ID */
+ domain_id_free(gcr3_info->domid);
+
+ free_page((unsigned long)gcr3_info->gcr3_tbl);
+ gcr3_info->gcr3_tbl = NULL;
}
/*
@@ -1684,33 +1781,133 @@ static int get_gcr3_levels(int pasids)
return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels;
}
-/* Note: This function expects iommu_domain->lock to be held prior calling the function. */
-static int setup_gcr3_table(struct protection_domain *domain, int pasids)
+static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
+ struct amd_iommu *iommu, int pasids)
{
int levels = get_gcr3_levels(pasids);
+ int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
if (levels > amd_iommu_max_glx_val)
return -EINVAL;
- domain->gcr3_tbl = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
- if (domain->gcr3_tbl == NULL)
+ if (gcr3_info->gcr3_tbl)
+ return -EBUSY;
+
+ /* Allocate per device domain ID */
+ gcr3_info->domid = domain_id_alloc();
+
+ gcr3_info->gcr3_tbl = alloc_pgtable_page(nid, GFP_ATOMIC);
+ if (gcr3_info->gcr3_tbl == NULL) {
+ domain_id_free(gcr3_info->domid);
return -ENOMEM;
+ }
+
+ gcr3_info->glx = levels;
+
+ return 0;
+}
+
+static u64 *__get_gcr3_pte(struct gcr3_tbl_info *gcr3_info,
+ ioasid_t pasid, bool alloc)
+{
+ int index;
+ u64 *pte;
+ u64 *root = gcr3_info->gcr3_tbl;
+ int level = gcr3_info->glx;
+
+ while (true) {
+
+ index = (pasid >> (9 * level)) & 0x1ff;
+ pte = &root[index];
+
+ if (level == 0)
+ break;
+
+ if (!(*pte & GCR3_VALID)) {
+ if (!alloc)
+ return NULL;
+
+ root = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (root == NULL)
+ return NULL;
+
+ *pte = iommu_virt_to_phys(root) | GCR3_VALID;
+ }
+
+ root = iommu_phys_to_virt(*pte & PAGE_MASK);
+
+ level -= 1;
+ }
+
+ return pte;
+}
+
+static int update_gcr3(struct iommu_dev_data *dev_data,
+ ioasid_t pasid, unsigned long gcr3, bool set)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ u64 *pte;
- domain->glx = levels;
- domain->flags |= PD_IOMMUV2_MASK;
+ pte = __get_gcr3_pte(gcr3_info, pasid, true);
+ if (pte == NULL)
+ return -ENOMEM;
- amd_iommu_domain_update(domain);
+ if (set)
+ *pte = (gcr3 & PAGE_MASK) | GCR3_VALID;
+ else
+ *pte = 0;
+ amd_iommu_dev_flush_pasid_all(dev_data, pasid);
return 0;
}
-static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
- struct protection_domain *domain, bool ats, bool ppr)
+int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid,
+ unsigned long gcr3)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ int ret;
+
+ iommu_group_mutex_assert(dev_data->dev);
+
+ ret = update_gcr3(dev_data, pasid, gcr3, true);
+ if (ret)
+ return ret;
+
+ gcr3_info->pasid_cnt++;
+ return ret;
+}
+
+int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ int ret;
+
+ iommu_group_mutex_assert(dev_data->dev);
+
+ ret = update_gcr3(dev_data, pasid, 0, false);
+ if (ret)
+ return ret;
+
+ gcr3_info->pasid_cnt--;
+ return ret;
+}
+
+static void set_dte_entry(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data)
{
u64 pte_root = 0;
u64 flags = 0;
u32 old_domid;
+ u16 devid = dev_data->devid;
+ u16 domid;
+ struct protection_domain *domain = dev_data->domain;
struct dev_table_entry *dev_table = get_dev_table(iommu);
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+
+ if (gcr3_info && gcr3_info->gcr3_tbl)
+ domid = dev_data->gcr3_info.domid;
+ else
+ domid = domain->id;
if (domain->iop.mode != PAGE_MODE_NONE)
pte_root = iommu_virt_to_phys(domain->iop.root);
@@ -1724,23 +1921,23 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
* When SNP is enabled, Only set TV bit when IOMMU
* page translation is in use.
*/
- if (!amd_iommu_snp_en || (domain->id != 0))
+ if (!amd_iommu_snp_en || (domid != 0))
pte_root |= DTE_FLAG_TV;
flags = dev_table[devid].data[1];
- if (ats)
+ if (dev_data->ats_enabled)
flags |= DTE_FLAG_IOTLB;
- if (ppr)
+ if (dev_data->ppr)
pte_root |= 1ULL << DEV_ENTRY_PPR;
if (domain->dirty_tracking)
pte_root |= DTE_FLAG_HAD;
- if (domain->flags & PD_IOMMUV2_MASK) {
- u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
- u64 glx = domain->glx;
+ if (gcr3_info && gcr3_info->gcr3_tbl) {
+ u64 gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl);
+ u64 glx = gcr3_info->glx;
u64 tmp;
pte_root |= DTE_FLAG_GV;
@@ -1768,12 +1965,13 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
((u64)GUEST_PGTABLE_5_LEVEL << DTE_GPT_LEVEL_SHIFT);
}
- if (domain->flags & PD_GIOV_MASK)
+ /* GIOV is supported with V2 page table mode only */
+ if (pdom_is_v2_pgtbl_mode(domain))
pte_root |= DTE_FLAG_GIOV;
}
flags &= ~DEV_DOMID_MASK;
- flags |= domain->id;
+ flags |= domid;
old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK;
dev_table[devid].data[1] = flags;
@@ -1804,16 +2002,11 @@ static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
amd_iommu_apply_erratum_63(iommu, devid);
}
-static void do_attach(struct iommu_dev_data *dev_data,
- struct protection_domain *domain)
+static int do_attach(struct iommu_dev_data *dev_data,
+ struct protection_domain *domain)
{
- struct amd_iommu *iommu;
- bool ats;
-
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return;
- ats = dev_data->ats_enabled;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
+ int ret = 0;
/* Update data structures */
dev_data->domain = domain;
@@ -1827,22 +2020,40 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;
+ /* Init GCR3 table and update device table */
+ if (domain->pd_mode == PD_MODE_V2) {
+ /* By default, setup GCR3 table to support single PASID */
+ ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, 1);
+ if (ret)
+ return ret;
+
+ ret = update_gcr3(dev_data, 0,
+ iommu_virt_to_phys(domain->iop.pgd), true);
+ if (ret) {
+ free_gcr3_table(&dev_data->gcr3_info);
+ return ret;
+ }
+ }
+
/* Update device table */
- set_dte_entry(iommu, dev_data->devid, domain,
- ats, dev_data->ppr);
+ set_dte_entry(iommu, dev_data);
clone_aliases(iommu, dev_data->dev);
device_flush_dte(dev_data);
+
+ return ret;
}
static void do_detach(struct iommu_dev_data *dev_data)
{
struct protection_domain *domain = dev_data->domain;
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return;
+ /* Clear GCR3 table */
+ if (domain->pd_mode == PD_MODE_V2) {
+ update_gcr3(dev_data, 0, 0, false);
+ free_gcr3_table(&dev_data->gcr3_info);
+ }
/* Update data structures */
dev_data->domain = NULL;
@@ -1886,7 +2097,7 @@ static int attach_device(struct device *dev,
if (dev_is_pci(dev))
pdev_enable_caps(to_pci_dev(dev));
- do_attach(dev_data, domain);
+ ret = do_attach(dev_data, domain);
out:
spin_unlock(&dev_data->lock);
@@ -1954,8 +2165,7 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
ret = iommu_init_device(iommu, dev);
if (ret) {
- if (ret != -ENOTSUPP)
- dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
+ dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
iommu_dev = ERR_PTR(ret);
iommu_ignore_device(iommu, dev);
} else {
@@ -2000,42 +2210,6 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
/*****************************************************************************
*
- * The next functions belong to the dma_ops mapping/unmapping code.
- *
- *****************************************************************************/
-
-static void update_device_table(struct protection_domain *domain)
-{
- struct iommu_dev_data *dev_data;
-
- list_for_each_entry(dev_data, &domain->dev_list, list) {
- struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
-
- if (!iommu)
- continue;
- set_dte_entry(iommu, dev_data->devid, domain,
- dev_data->ats_enabled, dev_data->ppr);
- clone_aliases(iommu, dev_data->dev);
- }
-}
-
-void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
-{
- update_device_table(domain);
- domain_flush_devices(domain);
-}
-
-void amd_iommu_domain_update(struct protection_domain *domain)
-{
- /* Update device table */
- amd_iommu_update_and_flush_device_table(domain);
-
- /* Flush domain TLB(s) and wait for completion */
- amd_iommu_domain_flush_all(domain);
-}
-
-/*****************************************************************************
- *
* The following functions belong to the exported interface of AMD IOMMU
*
* This interface allows access to lower level functions of the IOMMU
@@ -2070,9 +2244,6 @@ static void protection_domain_free(struct protection_domain *domain)
if (domain->iop.pgtbl_cfg.tlb)
free_io_pgtable_ops(&domain->iop.iop.ops);
- if (domain->flags & PD_IOMMUV2_MASK)
- free_gcr3_table(domain);
-
if (domain->iop.root)
free_page((unsigned long)domain->iop.root);
@@ -2094,19 +2265,16 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
return -ENOMEM;
}
+ domain->pd_mode = PD_MODE_V1;
amd_iommu_domain_set_pgtable(domain, pt_root, mode);
return 0;
}
-static int protection_domain_init_v2(struct protection_domain *domain)
+static int protection_domain_init_v2(struct protection_domain *pdom)
{
- domain->flags |= PD_GIOV_MASK;
-
- domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
-
- if (setup_gcr3_table(domain, 1))
- return -ENOMEM;
+ pdom->pd_mode = PD_MODE_V2;
+ pdom->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
return 0;
}
@@ -2194,11 +2362,8 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
struct protection_domain *domain;
struct amd_iommu *iommu = NULL;
- if (dev) {
- iommu = rlookup_amd_iommu(dev);
- if (!iommu)
- return ERR_PTR(-ENODEV);
- }
+ if (dev)
+ iommu = get_amd_iommu_from_dev(dev);
/*
* Since DTE[Mode]=0 is prohibited on SNP-enabled system,
@@ -2279,7 +2444,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
struct protection_domain *domain = to_pdomain(dom);
- struct amd_iommu *iommu = rlookup_amd_iommu(dev);
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
int ret;
/*
@@ -2337,7 +2502,7 @@ static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
int prot = 0;
int ret = -EINVAL;
- if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ if ((domain->pd_mode == PD_MODE_V1) &&
(domain->iop.mode == PAGE_MODE_NONE))
return -EINVAL;
@@ -2383,7 +2548,7 @@ static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
size_t r;
- if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ if ((domain->pd_mode == PD_MODE_V1) &&
(domain->iop.mode == PAGE_MODE_NONE))
return 0;
@@ -2418,7 +2583,7 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
case IOMMU_CAP_DEFERRED_FLUSH:
return true;
case IOMMU_CAP_DIRTY_TRACKING: {
- struct amd_iommu *iommu = rlookup_amd_iommu(dev);
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
return amd_iommu_hd_support(iommu);
}
@@ -2447,9 +2612,7 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
}
list_for_each_entry(dev_data, &pdomain->dev_list, list) {
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- continue;
+ iommu = get_amd_iommu_from_dev_data(dev_data);
dev_table = get_dev_table(iommu);
pte_root = dev_table[dev_data->devid].data[0];
@@ -2509,9 +2672,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
return;
devid = PCI_SBDF_TO_DEVID(sbdf);
- iommu = rlookup_amd_iommu(dev);
- if (!iommu)
- return;
+ iommu = get_amd_iommu_from_dev(dev);
pci_seg = iommu->pci_seg;
list_for_each_entry(entry, &pci_seg->unity_map, list) {
@@ -2649,216 +2810,6 @@ const struct iommu_ops amd_iommu_ops = {
}
};
-static int __flush_pasid(struct protection_domain *domain, u32 pasid,
- u64 address, size_t size)
-{
- struct iommu_dev_data *dev_data;
- struct iommu_cmd cmd;
- int i, ret;
-
- if (!(domain->flags & PD_IOMMUV2_MASK))
- return -EINVAL;
-
- build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, true);
-
- /*
- * IOMMU TLB needs to be flushed before Device TLB to
- * prevent device TLB refill from IOMMU TLB
- */
- for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
- if (domain->dev_iommu[i] == 0)
- continue;
-
- ret = iommu_queue_command(amd_iommus[i], &cmd);
- if (ret != 0)
- goto out;
- }
-
- /* Wait until IOMMU TLB flushes are complete */
- amd_iommu_domain_flush_complete(domain);
-
- /* Now flush device TLBs */
- list_for_each_entry(dev_data, &domain->dev_list, list) {
- struct amd_iommu *iommu;
- int qdep;
-
- /*
- There might be non-IOMMUv2 capable devices in an IOMMUv2
- * domain.
- */
- if (!dev_data->ats_enabled)
- continue;
-
- qdep = dev_data->ats_qdep;
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- continue;
- build_inv_iotlb_pages(&cmd, dev_data->devid, qdep,
- address, size, pasid, true);
-
- ret = iommu_queue_command(iommu, &cmd);
- if (ret != 0)
- goto out;
- }
-
- /* Wait until all device TLBs are flushed */
- amd_iommu_domain_flush_complete(domain);
-
- ret = 0;
-
-out:
-
- return ret;
-}
-
-static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
- u64 address)
-{
- return __flush_pasid(domain, pasid, address, PAGE_SIZE);
-}
-
-int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
- u64 address)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __amd_iommu_flush_page(domain, pasid, address);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
-{
- return __flush_pasid(domain, pasid, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
-}
-
-int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __amd_iommu_flush_tlb(domain, pasid);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
-{
- int index;
- u64 *pte;
-
- while (true) {
-
- index = (pasid >> (9 * level)) & 0x1ff;
- pte = &root[index];
-
- if (level == 0)
- break;
-
- if (!(*pte & GCR3_VALID)) {
- if (!alloc)
- return NULL;
-
- root = (void *)get_zeroed_page(GFP_ATOMIC);
- if (root == NULL)
- return NULL;
-
- *pte = iommu_virt_to_phys(root) | GCR3_VALID;
- }
-
- root = iommu_phys_to_virt(*pte & PAGE_MASK);
-
- level -= 1;
- }
-
- return pte;
-}
-
-static int __set_gcr3(struct protection_domain *domain, u32 pasid,
- unsigned long cr3)
-{
- u64 *pte;
-
- if (domain->iop.mode != PAGE_MODE_NONE)
- return -EINVAL;
-
- pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
- if (pte == NULL)
- return -ENOMEM;
-
- *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
-
- return __amd_iommu_flush_tlb(domain, pasid);
-}
-
-static int __clear_gcr3(struct protection_domain *domain, u32 pasid)
-{
- u64 *pte;
-
- if (domain->iop.mode != PAGE_MODE_NONE)
- return -EINVAL;
-
- pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
- if (pte == NULL)
- return 0;
-
- *pte = 0;
-
- return __amd_iommu_flush_tlb(domain, pasid);
-}
-
-int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
- unsigned long cr3)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __set_gcr3(domain, pasid, cr3);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __clear_gcr3(domain, pasid);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
- int status, int tag)
-{
- struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu;
- struct iommu_cmd cmd;
-
- dev_data = dev_iommu_priv_get(&pdev->dev);
- iommu = rlookup_amd_iommu(&pdev->dev);
- if (!iommu)
- return -ENODEV;
-
- build_complete_ppr(&cmd, dev_data->devid, pasid, status,
- tag, dev_data->pri_tlp);
-
- return iommu_queue_command(iommu, &cmd);
-}
-
#ifdef CONFIG_IRQ_REMAP
/*****************************************************************************