summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig6
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/amd/amd_iommu.h42
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h34
-rw-r--r--drivers/iommu/amd/init.c146
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c21
-rw-r--r--drivers/iommu/amd/iommu.c645
-rw-r--r--drivers/iommu/apple-dart.c3
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c22
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c811
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h6
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c1
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c3
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c3
-rw-r--r--drivers/iommu/dma-iommu.c5
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/iommu/intel/Kconfig14
-rw-r--r--drivers/iommu/intel/iommu.c234
-rw-r--r--drivers/iommu/intel/iommu.h4
-rw-r--r--drivers/iommu/intel/pasid.c138
-rw-r--r--drivers/iommu/intel/pasid.h1
-rw-r--r--drivers/iommu/intel/perf.c2
-rw-r--r--drivers/iommu/intel/svm.c63
-rw-r--r--drivers/iommu/io-pgfault.c463
-rw-r--r--drivers/iommu/iommu-priv.h5
-rw-r--r--drivers/iommu/iommu-sva.c71
-rw-r--r--drivers/iommu/iommu-sva.h71
-rw-r--r--drivers/iommu/iommu.c261
-rw-r--r--drivers/iommu/iova.c143
-rw-r--r--drivers/iommu/ipmmu-vmsa.c19
-rw-r--r--drivers/iommu/msm_iommu.c4
-rw-r--r--drivers/iommu/mtk_iommu.c5
-rw-r--r--drivers/iommu/mtk_iommu_v1.c7
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/rockchip-iommu.c2
-rw-r--r--drivers/iommu/sprd-iommu.c3
-rw-r--r--drivers/iommu/sun50i-iommu.c2
-rw-r--r--drivers/iommu/tegra-smmu.c4
-rw-r--r--drivers/iommu/virtio-iommu.c3
39 files changed, 1686 insertions, 1588 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 9dbb55e745..0af39bbbe3 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -163,6 +163,9 @@ config IOMMU_SVA
select IOMMU_MM_DATA
bool
+config IOMMU_IOPF
+ bool
+
config FSL_PAMU
bool "Freescale IOMMU support"
depends on PCI
@@ -179,7 +182,7 @@ config FSL_PAMU
config MSM_IOMMU
bool "MSM IOMMU Support"
depends on ARM
- depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
+ depends on ARCH_QCOM || COMPILE_TEST
select IOMMU_API
select IOMMU_IO_PGTABLE_ARMV7S
help
@@ -398,6 +401,7 @@ config ARM_SMMU_V3_SVA
bool "Shared Virtual Addressing support for the ARM SMMUv3"
depends on ARM_SMMU_V3
select IOMMU_SVA
+ select IOMMU_IOPF
select MMU_NOTIFIER
help
Support for sharing process address spaces with devices using the
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 95ad9dbfbd..542760d963 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
-obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o io-pgfault.o
+obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o
+obj-$(CONFIG_IOMMU_IOPF) += io-pgfault.o
obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o
obj-$(CONFIG_APPLE_DART) += apple-dart.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 8b3601f285..f482aab420 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -39,20 +39,16 @@ extern enum io_pgtable_fmt amd_iommu_pgtable;
extern int amd_iommu_gpt_level;
bool amd_iommu_v2_supported(void);
-struct amd_iommu *get_amd_iommu(unsigned int idx);
-u8 amd_iommu_pc_get_max_banks(unsigned int idx);
-bool amd_iommu_pc_supported(void);
-u8 amd_iommu_pc_get_max_counters(unsigned int idx);
-int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
- u8 fxn, u64 *value);
-int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
- u8 fxn, u64 *value);
/* Device capabilities */
int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev);
void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev);
-int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, u64 address);
+/* GCR3 setup */
+int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data,
+ ioasid_t pasid, unsigned long gcr3);
+int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid);
+
/*
* This function flushes all internal caches of
* the IOMMU used by this driver.
@@ -63,10 +59,10 @@ void amd_iommu_domain_update(struct protection_domain *domain);
void amd_iommu_domain_flush_complete(struct protection_domain *domain);
void amd_iommu_domain_flush_pages(struct protection_domain *domain,
u64 address, size_t size);
-int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
-int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
- unsigned long cr3);
-int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid);
+void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
+ ioasid_t pasid, u64 address, size_t size);
+void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data,
+ ioasid_t pasid);
#ifdef CONFIG_IRQ_REMAP
int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
@@ -77,10 +73,6 @@ static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
}
#endif
-#define PPR_SUCCESS 0x0
-#define PPR_INVALID 0x1
-#define PPR_FAILURE 0xf
-
int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
int status, int tag);
@@ -150,6 +142,21 @@ static inline void *alloc_pgtable_page(int nid, gfp_t gfp)
return page ? page_address(page) : NULL;
}
+/*
+ * This must be called after device probe completes. During probe
+ * use rlookup_amd_iommu() get the iommu.
+ */
+static inline struct amd_iommu *get_amd_iommu_from_dev(struct device *dev)
+{
+ return iommu_get_iommu_dev(dev, struct amd_iommu, iommu);
+}
+
+/* This must be called after device probe completes. */
+static inline struct amd_iommu *get_amd_iommu_from_dev_data(struct iommu_dev_data *dev_data)
+{
+ return iommu_get_iommu_dev(dev_data->dev, struct amd_iommu, iommu);
+}
+
bool translation_pre_enabled(struct amd_iommu *iommu);
bool amd_iommu_is_attach_deferred(struct device *dev);
int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line);
@@ -164,5 +171,4 @@ void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
-extern bool amd_iommu_snp_en;
#endif
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 809d74faa1..d1fed5fc21 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -453,15 +453,6 @@
#define MAX_DOMAIN_ID 65536
-/* Protection domain flags */
-#define PD_DMA_OPS_MASK BIT(0) /* domain used for dma_ops */
-#define PD_DEFAULT_MASK BIT(1) /* domain is a default dma_ops
- domain for an IOMMU */
-#define PD_PASSTHROUGH_MASK BIT(2) /* domain has no page
- translation */
-#define PD_IOMMUV2_MASK BIT(3) /* domain has gcr3 table */
-#define PD_GIOV_MASK BIT(4) /* domain enable GIOV support */
-
/* Timeout stuff */
#define LOOP_TIMEOUT 100000
#define MMIO_STATUS_TIMEOUT 2000000
@@ -513,14 +504,6 @@ extern struct kmem_cache *amd_iommu_irq_cache;
#define for_each_iommu_safe(iommu, next) \
list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
-#define APERTURE_RANGE_SHIFT 27 /* 128 MB */
-#define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT)
-#define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT)
-#define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */
-#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
-#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
-
-
struct amd_iommu;
struct iommu_domain;
struct irq_domain;
@@ -541,6 +524,13 @@ struct amd_irte_ops;
#define io_pgtable_cfg_to_data(x) \
container_of((x), struct amd_io_pgtable, pgtbl_cfg)
+struct gcr3_tbl_info {
+ u64 *gcr3_tbl; /* Guest CR3 table */
+ int glx; /* Number of levels for GCR3 table */
+ u32 pasid_cnt; /* Track attached PASIDs */
+ u16 domid; /* Per device domain ID */
+};
+
struct amd_io_pgtable {
struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable iop;
@@ -549,6 +539,11 @@ struct amd_io_pgtable {
u64 *pgd; /* v2 pgtable pgd pointer */
};
+enum protection_domain_mode {
+ PD_MODE_V1 = 1,
+ PD_MODE_V2,
+};
+
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
@@ -560,10 +555,8 @@ struct protection_domain {
struct amd_io_pgtable iop;
spinlock_t lock; /* mostly used to lock the page table*/
u16 id; /* the domain id written to the device table */
- int glx; /* Number of levels for GCR3 table */
int nid; /* Node ID */
- u64 *gcr3_tbl; /* Guest CR3 table */
- unsigned long flags; /* flags to find out type of domain */
+ enum protection_domain_mode pd_mode; /* Track page table type */
bool dirty_tracking; /* dirty tracking is enabled in the domain */
unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
@@ -816,6 +809,7 @@ struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
+ struct gcr3_tbl_info gcr3_info; /* Per-device GCR3 table */
struct device *dev;
u16 devid; /* PCI Device ID */
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 40979b0f52..e740dc54c4 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -30,6 +30,7 @@
#include <asm/io_apic.h>
#include <asm/irq_remapping.h>
#include <asm/set_memory.h>
+#include <asm/sev.h>
#include <linux/crash_dump.h>
@@ -1677,8 +1678,17 @@ static void __init free_pci_segments(void)
}
}
+static void __init free_sysfs(struct amd_iommu *iommu)
+{
+ if (iommu->iommu.dev) {
+ iommu_device_unregister(&iommu->iommu);
+ iommu_device_sysfs_remove(&iommu->iommu);
+ }
+}
+
static void __init free_iommu_one(struct amd_iommu *iommu)
{
+ free_sysfs(iommu);
free_cwwb_sem(iommu);
free_command_buffer(iommu);
free_event_buffer(iommu);
@@ -2096,6 +2106,8 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_max_glx_val = glxval;
else
amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
+
+ iommu_enable_gt(iommu);
}
if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu))
@@ -2841,10 +2853,8 @@ static void enable_iommus_v2(void)
{
struct amd_iommu *iommu;
- for_each_iommu(iommu) {
+ for_each_iommu(iommu)
iommu_enable_ppr_log(iommu);
- iommu_enable_gt(iommu);
- }
}
static void enable_iommus_vapic(void)
@@ -3224,6 +3234,39 @@ out:
return true;
}
+static void iommu_snp_enable(void)
+{
+#ifdef CONFIG_KVM_AMD_SEV
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
+ return;
+ /*
+ * The SNP support requires that IOMMU must be enabled, and is
+ * configured with V1 page table (DTE[Mode] = 0 is not supported).
+ */
+ if (no_iommu || iommu_default_passthrough()) {
+ pr_warn("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
+ goto disable_snp;
+ }
+
+ if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+ pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n");
+ goto disable_snp;
+ }
+
+ amd_iommu_snp_en = check_feature(FEATURE_SNP);
+ if (!amd_iommu_snp_en) {
+ pr_warn("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
+ goto disable_snp;
+ }
+
+ pr_info("IOMMU SNP support enabled.\n");
+ return;
+
+disable_snp:
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
+#endif
+}
+
/****************************************************************************
*
* AMD IOMMU Initialization State Machine
@@ -3259,6 +3302,7 @@ static int __init state_next(void)
break;
case IOMMU_ENABLED:
register_syscore_ops(&amd_iommu_syscore_ops);
+ iommu_snp_enable();
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
break;
@@ -3697,13 +3741,11 @@ u8 amd_iommu_pc_get_max_banks(unsigned int idx)
return 0;
}
-EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
bool amd_iommu_pc_supported(void)
{
return amd_iommu_pc_present;
}
-EXPORT_SYMBOL(amd_iommu_pc_supported);
u8 amd_iommu_pc_get_max_counters(unsigned int idx)
{
@@ -3714,7 +3756,6 @@ u8 amd_iommu_pc_get_max_counters(unsigned int idx)
return 0;
}
-EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value, bool is_write)
@@ -3770,40 +3811,85 @@ int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64
return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
}
-#ifdef CONFIG_AMD_MEM_ENCRYPT
-int amd_iommu_snp_enable(void)
+#ifdef CONFIG_KVM_AMD_SEV
+static int iommu_page_make_shared(void *page)
{
- /*
- * The SNP support requires that IOMMU must be enabled, and is
- * not configured in the passthrough mode.
- */
- if (no_iommu || iommu_default_passthrough()) {
- pr_err("SNP: IOMMU is disabled or configured in passthrough mode, SNP cannot be supported");
- return -EINVAL;
+ unsigned long paddr, pfn;
+
+ paddr = iommu_virt_to_phys(page);
+ /* Cbit maybe set in the paddr */
+ pfn = __sme_clr(paddr) >> PAGE_SHIFT;
+
+ if (!(pfn % PTRS_PER_PMD)) {
+ int ret, level;
+ bool assigned;
+
+ ret = snp_lookup_rmpentry(pfn, &assigned, &level);
+ if (ret) {
+ pr_warn("IOMMU PFN %lx RMP lookup failed, ret %d\n", pfn, ret);
+ return ret;
+ }
+
+ if (!assigned) {
+ pr_warn("IOMMU PFN %lx not assigned in RMP table\n", pfn);
+ return -EINVAL;
+ }
+
+ if (level > PG_LEVEL_4K) {
+ ret = psmash(pfn);
+ if (!ret)
+ goto done;
+
+ pr_warn("PSMASH failed for IOMMU PFN %lx huge RMP entry, ret: %d, level: %d\n",
+ pfn, ret, level);
+ return ret;
+ }
}
- /*
- * Prevent enabling SNP after IOMMU_ENABLED state because this process
- * affect how IOMMU driver sets up data structures and configures
- * IOMMU hardware.
- */
- if (init_state > IOMMU_ENABLED) {
- pr_err("SNP: Too late to enable SNP for IOMMU.\n");
- return -EINVAL;
+done:
+ return rmp_make_shared(pfn, PG_LEVEL_4K);
+}
+
+static int iommu_make_shared(void *va, size_t size)
+{
+ void *page;
+ int ret;
+
+ if (!va)
+ return 0;
+
+ for (page = va; page < (va + size); page += PAGE_SIZE) {
+ ret = iommu_page_make_shared(page);
+ if (ret)
+ return ret;
}
- amd_iommu_snp_en = check_feature(FEATURE_SNP);
+ return 0;
+}
+
+int amd_iommu_snp_disable(void)
+{
+ struct amd_iommu *iommu;
+ int ret;
+
if (!amd_iommu_snp_en)
- return -EINVAL;
+ return 0;
- pr_info("SNP enabled\n");
+ for_each_iommu(iommu) {
+ ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE);
+ if (ret)
+ return ret;
- /* Enforce IOMMU v1 pagetable when SNP is enabled. */
- if (amd_iommu_pgtable != AMD_IOMMU_V1) {
- pr_warn("Force to using AMD IOMMU v1 page table due to SNP\n");
- amd_iommu_pgtable = AMD_IOMMU_V1;
+ ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE);
+ if (ret)
+ return ret;
+
+ ret = iommu_make_shared((void *)iommu->cmd_sem, PAGE_SIZE);
+ if (ret)
+ return ret;
}
return 0;
}
+EXPORT_SYMBOL_GPL(amd_iommu_snp_disable);
#endif
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index 6d69ba6074..93489d2db4 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -350,38 +350,26 @@ static const struct iommu_flush_ops v2_flush_ops = {
static void v2_free_pgtable(struct io_pgtable *iop)
{
- struct protection_domain *pdom;
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
- pdom = container_of(pgtable, struct protection_domain, iop);
- if (!(pdom->flags & PD_IOMMUV2_MASK))
+ if (!pgtable || !pgtable->pgd)
return;
- /* Clear gcr3 entry */
- amd_iommu_domain_clear_gcr3(&pdom->domain, 0);
-
- /* Make changes visible to IOMMUs */
- amd_iommu_domain_update(pdom);
-
/* Free page table */
free_pgtable(pgtable->pgd, get_pgtable_level());
+ pgtable->pgd = NULL;
}
static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
struct protection_domain *pdom = (struct protection_domain *)cookie;
- int ret;
int ias = IOMMU_IN_ADDR_BIT_SIZE;
pgtable->pgd = alloc_pgtable_page(pdom->nid, GFP_ATOMIC);
if (!pgtable->pgd)
return NULL;
- ret = amd_iommu_domain_set_gcr3(&pdom->domain, 0, iommu_virt_to_phys(pgtable->pgd));
- if (ret)
- goto err_free_pgd;
-
if (get_pgtable_level() == PAGE_MODE_5_LEVEL)
ias = 57;
@@ -395,11 +383,6 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
cfg->tlb = &v2_flush_ops;
return &pgtable->iop;
-
-err_free_pgd:
- free_pgtable_page(pgtable->pgd);
-
- return NULL;
}
struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index f945bf3253..fb727f5b0b 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -45,10 +45,6 @@
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
-/* IO virtual address start page frame number */
-#define IOVA_START_PFN (1)
-#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
-
/* Reserved IOVA ranges */
#define MSI_RANGE_START (0xfee00000)
#define MSI_RANGE_END (0xfeefffff)
@@ -79,6 +75,9 @@ struct kmem_cache *amd_iommu_irq_cache;
static void detach_device(struct device *dev);
+static void set_dte_entry(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data);
+
/****************************************************************************
*
* Helper functions
@@ -87,7 +86,7 @@ static void detach_device(struct device *dev);
static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
{
- return (pdom && (pdom->flags & PD_IOMMUV2_MASK));
+ return (pdom && (pdom->pd_mode == PD_MODE_V2));
}
static inline int get_acpihid_device_id(struct device *dev,
@@ -1388,14 +1387,9 @@ void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address,
size_t size, ioasid_t pasid, bool gn)
{
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
struct iommu_cmd cmd;
- int qdep;
-
- qdep = dev_data->ats_qdep;
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return -EINVAL;
+ int qdep = dev_data->ats_qdep;
build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address,
size, pasid, gn);
@@ -1415,16 +1409,12 @@ static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
*/
static int device_flush_dte(struct iommu_dev_data *dev_data)
{
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
struct pci_dev *pdev = NULL;
struct amd_iommu_pci_seg *pci_seg;
u16 alias;
int ret;
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return -EINVAL;
-
if (dev_is_pci(dev_data->dev))
pdev = to_pci_dev(dev_data->dev);
@@ -1453,27 +1443,37 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
return ret;
}
-/*
- * TLB invalidation function which is called from the mapping functions.
- * It invalidates a single PTE if the range to flush is within a single
- * page. Otherwise it flushes the whole TLB of the IOMMU.
- */
-static void __domain_flush_pages(struct protection_domain *domain,
+static int domain_flush_pages_v2(struct protection_domain *pdom,
u64 address, size_t size)
{
struct iommu_dev_data *dev_data;
struct iommu_cmd cmd;
- int ret = 0, i;
- ioasid_t pasid = IOMMU_NO_PASID;
- bool gn = false;
+ int ret = 0;
- if (pdom_is_v2_pgtbl_mode(domain))
- gn = true;
+ list_for_each_entry(dev_data, &pdom->dev_list, list) {
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
+ u16 domid = dev_data->gcr3_info.domid;
+
+ build_inv_iommu_pages(&cmd, address, size,
+ domid, IOMMU_NO_PASID, true);
+
+ ret |= iommu_queue_command(iommu, &cmd);
+ }
+
+ return ret;
+}
- build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, gn);
+static int domain_flush_pages_v1(struct protection_domain *pdom,
+ u64 address, size_t size)
+{
+ struct iommu_cmd cmd;
+ int ret = 0, i;
+
+ build_inv_iommu_pages(&cmd, address, size,
+ pdom->id, IOMMU_NO_PASID, false);
for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
- if (!domain->dev_iommu[i])
+ if (!pdom->dev_iommu[i])
continue;
/*
@@ -1483,6 +1483,28 @@ static void __domain_flush_pages(struct protection_domain *domain,
ret |= iommu_queue_command(amd_iommus[i], &cmd);
}
+ return ret;
+}
+
+/*
+ * TLB invalidation function which is called from the mapping functions.
+ * It flushes range of PTEs of the domain.
+ */
+static void __domain_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size)
+{
+ struct iommu_dev_data *dev_data;
+ int ret = 0;
+ ioasid_t pasid = IOMMU_NO_PASID;
+ bool gn = false;
+
+ if (pdom_is_v2_pgtbl_mode(domain)) {
+ gn = true;
+ ret = domain_flush_pages_v2(domain, address, size);
+ } else {
+ ret = domain_flush_pages_v1(domain, address, size);
+ }
+
list_for_each_entry(dev_data, &domain->dev_list, list) {
if (!dev_data->ats_enabled)
@@ -1551,6 +1573,29 @@ static void amd_iommu_domain_flush_all(struct protection_domain *domain)
CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
}
+void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
+ ioasid_t pasid, u64 address, size_t size)
+{
+ struct iommu_cmd cmd;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
+
+ build_inv_iommu_pages(&cmd, address, size,
+ dev_data->gcr3_info.domid, pasid, true);
+ iommu_queue_command(iommu, &cmd);
+
+ if (dev_data->ats_enabled)
+ device_flush_iotlb(dev_data, address, size, pasid, true);
+
+ iommu_completion_wait(iommu);
+}
+
+void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data,
+ ioasid_t pasid)
+{
+ amd_iommu_dev_flush_pasid_pages(dev_data, 0,
+ CMD_INV_IOMMU_ALL_PAGES_ADDRESS, pasid);
+}
+
void amd_iommu_domain_flush_complete(struct protection_domain *domain)
{
int i;
@@ -1592,6 +1637,49 @@ static void domain_flush_devices(struct protection_domain *domain)
device_flush_dte(dev_data);
}
+static void update_device_table(struct protection_domain *domain)
+{
+ struct iommu_dev_data *dev_data;
+
+ list_for_each_entry(dev_data, &domain->dev_list, list) {
+ struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
+
+ set_dte_entry(iommu, dev_data);
+ clone_aliases(iommu, dev_data->dev);
+ }
+}
+
+void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
+{
+ update_device_table(domain);
+ domain_flush_devices(domain);
+}
+
+void amd_iommu_domain_update(struct protection_domain *domain)
+{
+ /* Update device table */
+ amd_iommu_update_and_flush_device_table(domain);
+
+ /* Flush domain TLB(s) and wait for completion */
+ amd_iommu_domain_flush_all(domain);
+}
+
+int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
+ int status, int tag)
+{
+ struct iommu_dev_data *dev_data;
+ struct amd_iommu *iommu;
+ struct iommu_cmd cmd;
+
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+ iommu = get_amd_iommu_from_dev(&pdev->dev);
+
+ build_complete_ppr(&cmd, dev_data->devid, pasid, status,
+ tag, dev_data->pri_tlp);
+
+ return iommu_queue_command(iommu, &cmd);
+}
+
/****************************************************************************
*
* The next functions belong to the domain allocation. A domain is
@@ -1604,26 +1692,29 @@ static void domain_flush_devices(struct protection_domain *domain)
static u16 domain_id_alloc(void)
{
+ unsigned long flags;
int id;
- spin_lock(&pd_bitmap_lock);
+ spin_lock_irqsave(&pd_bitmap_lock, flags);
id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
BUG_ON(id == 0);
if (id > 0 && id < MAX_DOMAIN_ID)
__set_bit(id, amd_iommu_pd_alloc_bitmap);
else
id = 0;
- spin_unlock(&pd_bitmap_lock);
+ spin_unlock_irqrestore(&pd_bitmap_lock, flags);
return id;
}
static void domain_id_free(int id)
{
- spin_lock(&pd_bitmap_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pd_bitmap_lock, flags);
if (id > 0 && id < MAX_DOMAIN_ID)
__clear_bit(id, amd_iommu_pd_alloc_bitmap);
- spin_unlock(&pd_bitmap_lock);
+ spin_unlock_irqrestore(&pd_bitmap_lock, flags);
}
static void free_gcr3_tbl_level1(u64 *tbl)
@@ -1656,16 +1747,22 @@ static void free_gcr3_tbl_level2(u64 *tbl)
}
}
-static void free_gcr3_table(struct protection_domain *domain)
+static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
{
- if (domain->glx == 2)
- free_gcr3_tbl_level2(domain->gcr3_tbl);
- else if (domain->glx == 1)
- free_gcr3_tbl_level1(domain->gcr3_tbl);
+ if (gcr3_info->glx == 2)
+ free_gcr3_tbl_level2(gcr3_info->gcr3_tbl);
+ else if (gcr3_info->glx == 1)
+ free_gcr3_tbl_level1(gcr3_info->gcr3_tbl);
else
- BUG_ON(domain->glx != 0);
+ WARN_ON_ONCE(gcr3_info->glx != 0);
+
+ gcr3_info->glx = 0;
- free_page((unsigned long)domain->gcr3_tbl);
+ /* Free per device domain ID */
+ domain_id_free(gcr3_info->domid);
+
+ free_page((unsigned long)gcr3_info->gcr3_tbl);
+ gcr3_info->gcr3_tbl = NULL;
}
/*
@@ -1684,33 +1781,133 @@ static int get_gcr3_levels(int pasids)
return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels;
}
-/* Note: This function expects iommu_domain->lock to be held prior calling the function. */
-static int setup_gcr3_table(struct protection_domain *domain, int pasids)
+static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
+ struct amd_iommu *iommu, int pasids)
{
int levels = get_gcr3_levels(pasids);
+ int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
if (levels > amd_iommu_max_glx_val)
return -EINVAL;
- domain->gcr3_tbl = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
- if (domain->gcr3_tbl == NULL)
+ if (gcr3_info->gcr3_tbl)
+ return -EBUSY;
+
+ /* Allocate per device domain ID */
+ gcr3_info->domid = domain_id_alloc();
+
+ gcr3_info->gcr3_tbl = alloc_pgtable_page(nid, GFP_ATOMIC);
+ if (gcr3_info->gcr3_tbl == NULL) {
+ domain_id_free(gcr3_info->domid);
return -ENOMEM;
+ }
+
+ gcr3_info->glx = levels;
+
+ return 0;
+}
+
+static u64 *__get_gcr3_pte(struct gcr3_tbl_info *gcr3_info,
+ ioasid_t pasid, bool alloc)
+{
+ int index;
+ u64 *pte;
+ u64 *root = gcr3_info->gcr3_tbl;
+ int level = gcr3_info->glx;
+
+ while (true) {
+
+ index = (pasid >> (9 * level)) & 0x1ff;
+ pte = &root[index];
+
+ if (level == 0)
+ break;
+
+ if (!(*pte & GCR3_VALID)) {
+ if (!alloc)
+ return NULL;
+
+ root = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (root == NULL)
+ return NULL;
+
+ *pte = iommu_virt_to_phys(root) | GCR3_VALID;
+ }
+
+ root = iommu_phys_to_virt(*pte & PAGE_MASK);
+
+ level -= 1;
+ }
+
+ return pte;
+}
+
+static int update_gcr3(struct iommu_dev_data *dev_data,
+ ioasid_t pasid, unsigned long gcr3, bool set)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ u64 *pte;
- domain->glx = levels;
- domain->flags |= PD_IOMMUV2_MASK;
+ pte = __get_gcr3_pte(gcr3_info, pasid, true);
+ if (pte == NULL)
+ return -ENOMEM;
- amd_iommu_domain_update(domain);
+ if (set)
+ *pte = (gcr3 & PAGE_MASK) | GCR3_VALID;
+ else
+ *pte = 0;
+ amd_iommu_dev_flush_pasid_all(dev_data, pasid);
return 0;
}
-static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
- struct protection_domain *domain, bool ats, bool ppr)
+int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid,
+ unsigned long gcr3)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ int ret;
+
+ iommu_group_mutex_assert(dev_data->dev);
+
+ ret = update_gcr3(dev_data, pasid, gcr3, true);
+ if (ret)
+ return ret;
+
+ gcr3_info->pasid_cnt++;
+ return ret;
+}
+
+int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ int ret;
+
+ iommu_group_mutex_assert(dev_data->dev);
+
+ ret = update_gcr3(dev_data, pasid, 0, false);
+ if (ret)
+ return ret;
+
+ gcr3_info->pasid_cnt--;
+ return ret;
+}
+
+static void set_dte_entry(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data)
{
u64 pte_root = 0;
u64 flags = 0;
u32 old_domid;
+ u16 devid = dev_data->devid;
+ u16 domid;
+ struct protection_domain *domain = dev_data->domain;
struct dev_table_entry *dev_table = get_dev_table(iommu);
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+
+ if (gcr3_info && gcr3_info->gcr3_tbl)
+ domid = dev_data->gcr3_info.domid;
+ else
+ domid = domain->id;
if (domain->iop.mode != PAGE_MODE_NONE)
pte_root = iommu_virt_to_phys(domain->iop.root);
@@ -1724,23 +1921,23 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
* When SNP is enabled, Only set TV bit when IOMMU
* page translation is in use.
*/
- if (!amd_iommu_snp_en || (domain->id != 0))
+ if (!amd_iommu_snp_en || (domid != 0))
pte_root |= DTE_FLAG_TV;
flags = dev_table[devid].data[1];
- if (ats)
+ if (dev_data->ats_enabled)
flags |= DTE_FLAG_IOTLB;
- if (ppr)
+ if (dev_data->ppr)
pte_root |= 1ULL << DEV_ENTRY_PPR;
if (domain->dirty_tracking)
pte_root |= DTE_FLAG_HAD;
- if (domain->flags & PD_IOMMUV2_MASK) {
- u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
- u64 glx = domain->glx;
+ if (gcr3_info && gcr3_info->gcr3_tbl) {
+ u64 gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl);
+ u64 glx = gcr3_info->glx;
u64 tmp;
pte_root |= DTE_FLAG_GV;
@@ -1768,12 +1965,13 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
((u64)GUEST_PGTABLE_5_LEVEL << DTE_GPT_LEVEL_SHIFT);
}
- if (domain->flags & PD_GIOV_MASK)
+ /* GIOV is supported with V2 page table mode only */
+ if (pdom_is_v2_pgtbl_mode(domain))
pte_root |= DTE_FLAG_GIOV;
}
flags &= ~DEV_DOMID_MASK;
- flags |= domain->id;
+ flags |= domid;
old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK;
dev_table[devid].data[1] = flags;
@@ -1804,16 +2002,11 @@ static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
amd_iommu_apply_erratum_63(iommu, devid);
}
-static void do_attach(struct iommu_dev_data *dev_data,
- struct protection_domain *domain)
+static int do_attach(struct iommu_dev_data *dev_data,
+ struct protection_domain *domain)
{
- struct amd_iommu *iommu;
- bool ats;
-
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return;
- ats = dev_data->ats_enabled;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
+ int ret = 0;
/* Update data structures */
dev_data->domain = domain;
@@ -1827,22 +2020,40 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;
+ /* Init GCR3 table and update device table */
+ if (domain->pd_mode == PD_MODE_V2) {
+ /* By default, setup GCR3 table to support single PASID */
+ ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, 1);
+ if (ret)
+ return ret;
+
+ ret = update_gcr3(dev_data, 0,
+ iommu_virt_to_phys(domain->iop.pgd), true);
+ if (ret) {
+ free_gcr3_table(&dev_data->gcr3_info);
+ return ret;
+ }
+ }
+
/* Update device table */
- set_dte_entry(iommu, dev_data->devid, domain,
- ats, dev_data->ppr);
+ set_dte_entry(iommu, dev_data);
clone_aliases(iommu, dev_data->dev);
device_flush_dte(dev_data);
+
+ return ret;
}
static void do_detach(struct iommu_dev_data *dev_data)
{
struct protection_domain *domain = dev_data->domain;
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return;
+ /* Clear GCR3 table */
+ if (domain->pd_mode == PD_MODE_V2) {
+ update_gcr3(dev_data, 0, 0, false);
+ free_gcr3_table(&dev_data->gcr3_info);
+ }
/* Update data structures */
dev_data->domain = NULL;
@@ -1886,7 +2097,7 @@ static int attach_device(struct device *dev,
if (dev_is_pci(dev))
pdev_enable_caps(to_pci_dev(dev));
- do_attach(dev_data, domain);
+ ret = do_attach(dev_data, domain);
out:
spin_unlock(&dev_data->lock);
@@ -1954,8 +2165,7 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
ret = iommu_init_device(iommu, dev);
if (ret) {
- if (ret != -ENOTSUPP)
- dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
+ dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
iommu_dev = ERR_PTR(ret);
iommu_ignore_device(iommu, dev);
} else {
@@ -2000,42 +2210,6 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
/*****************************************************************************
*
- * The next functions belong to the dma_ops mapping/unmapping code.
- *
- *****************************************************************************/
-
-static void update_device_table(struct protection_domain *domain)
-{
- struct iommu_dev_data *dev_data;
-
- list_for_each_entry(dev_data, &domain->dev_list, list) {
- struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
-
- if (!iommu)
- continue;
- set_dte_entry(iommu, dev_data->devid, domain,
- dev_data->ats_enabled, dev_data->ppr);
- clone_aliases(iommu, dev_data->dev);
- }
-}
-
-void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
-{
- update_device_table(domain);
- domain_flush_devices(domain);
-}
-
-void amd_iommu_domain_update(struct protection_domain *domain)
-{
- /* Update device table */
- amd_iommu_update_and_flush_device_table(domain);
-
- /* Flush domain TLB(s) and wait for completion */
- amd_iommu_domain_flush_all(domain);
-}
-
-/*****************************************************************************
- *
* The following functions belong to the exported interface of AMD IOMMU
*
* This interface allows access to lower level functions of the IOMMU
@@ -2070,9 +2244,6 @@ static void protection_domain_free(struct protection_domain *domain)
if (domain->iop.pgtbl_cfg.tlb)
free_io_pgtable_ops(&domain->iop.iop.ops);
- if (domain->flags & PD_IOMMUV2_MASK)
- free_gcr3_table(domain);
-
if (domain->iop.root)
free_page((unsigned long)domain->iop.root);
@@ -2094,19 +2265,16 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
return -ENOMEM;
}
+ domain->pd_mode = PD_MODE_V1;
amd_iommu_domain_set_pgtable(domain, pt_root, mode);
return 0;
}
-static int protection_domain_init_v2(struct protection_domain *domain)
+static int protection_domain_init_v2(struct protection_domain *pdom)
{
- domain->flags |= PD_GIOV_MASK;
-
- domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
-
- if (setup_gcr3_table(domain, 1))
- return -ENOMEM;
+ pdom->pd_mode = PD_MODE_V2;
+ pdom->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
return 0;
}
@@ -2194,11 +2362,8 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
struct protection_domain *domain;
struct amd_iommu *iommu = NULL;
- if (dev) {
- iommu = rlookup_amd_iommu(dev);
- if (!iommu)
- return ERR_PTR(-ENODEV);
- }
+ if (dev)
+ iommu = get_amd_iommu_from_dev(dev);
/*
* Since DTE[Mode]=0 is prohibited on SNP-enabled system,
@@ -2279,7 +2444,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
struct protection_domain *domain = to_pdomain(dom);
- struct amd_iommu *iommu = rlookup_amd_iommu(dev);
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
int ret;
/*
@@ -2337,7 +2502,7 @@ static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
int prot = 0;
int ret = -EINVAL;
- if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ if ((domain->pd_mode == PD_MODE_V1) &&
(domain->iop.mode == PAGE_MODE_NONE))
return -EINVAL;
@@ -2383,7 +2548,7 @@ static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
size_t r;
- if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ if ((domain->pd_mode == PD_MODE_V1) &&
(domain->iop.mode == PAGE_MODE_NONE))
return 0;
@@ -2418,7 +2583,7 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
case IOMMU_CAP_DEFERRED_FLUSH:
return true;
case IOMMU_CAP_DIRTY_TRACKING: {
- struct amd_iommu *iommu = rlookup_amd_iommu(dev);
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
return amd_iommu_hd_support(iommu);
}
@@ -2447,9 +2612,7 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
}
list_for_each_entry(dev_data, &pdomain->dev_list, list) {
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- continue;
+ iommu = get_amd_iommu_from_dev_data(dev_data);
dev_table = get_dev_table(iommu);
pte_root = dev_table[dev_data->devid].data[0];
@@ -2509,9 +2672,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
return;
devid = PCI_SBDF_TO_DEVID(sbdf);
- iommu = rlookup_amd_iommu(dev);
- if (!iommu)
- return;
+ iommu = get_amd_iommu_from_dev(dev);
pci_seg = iommu->pci_seg;
list_for_each_entry(entry, &pci_seg->unity_map, list) {
@@ -2649,216 +2810,6 @@ const struct iommu_ops amd_iommu_ops = {
}
};
-static int __flush_pasid(struct protection_domain *domain, u32 pasid,
- u64 address, size_t size)
-{
- struct iommu_dev_data *dev_data;
- struct iommu_cmd cmd;
- int i, ret;
-
- if (!(domain->flags & PD_IOMMUV2_MASK))
- return -EINVAL;
-
- build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, true);
-
- /*
- * IOMMU TLB needs to be flushed before Device TLB to
- * prevent device TLB refill from IOMMU TLB
- */
- for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
- if (domain->dev_iommu[i] == 0)
- continue;
-
- ret = iommu_queue_command(amd_iommus[i], &cmd);
- if (ret != 0)
- goto out;
- }
-
- /* Wait until IOMMU TLB flushes are complete */
- amd_iommu_domain_flush_complete(domain);
-
- /* Now flush device TLBs */
- list_for_each_entry(dev_data, &domain->dev_list, list) {
- struct amd_iommu *iommu;
- int qdep;
-
- /*
- There might be non-IOMMUv2 capable devices in an IOMMUv2
- * domain.
- */
- if (!dev_data->ats_enabled)
- continue;
-
- qdep = dev_data->ats_qdep;
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- continue;
- build_inv_iotlb_pages(&cmd, dev_data->devid, qdep,
- address, size, pasid, true);
-
- ret = iommu_queue_command(iommu, &cmd);
- if (ret != 0)
- goto out;
- }
-
- /* Wait until all device TLBs are flushed */
- amd_iommu_domain_flush_complete(domain);
-
- ret = 0;
-
-out:
-
- return ret;
-}
-
-static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
- u64 address)
-{
- return __flush_pasid(domain, pasid, address, PAGE_SIZE);
-}
-
-int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
- u64 address)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __amd_iommu_flush_page(domain, pasid, address);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
-{
- return __flush_pasid(domain, pasid, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
-}
-
-int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __amd_iommu_flush_tlb(domain, pasid);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
-{
- int index;
- u64 *pte;
-
- while (true) {
-
- index = (pasid >> (9 * level)) & 0x1ff;
- pte = &root[index];
-
- if (level == 0)
- break;
-
- if (!(*pte & GCR3_VALID)) {
- if (!alloc)
- return NULL;
-
- root = (void *)get_zeroed_page(GFP_ATOMIC);
- if (root == NULL)
- return NULL;
-
- *pte = iommu_virt_to_phys(root) | GCR3_VALID;
- }
-
- root = iommu_phys_to_virt(*pte & PAGE_MASK);
-
- level -= 1;
- }
-
- return pte;
-}
-
-static int __set_gcr3(struct protection_domain *domain, u32 pasid,
- unsigned long cr3)
-{
- u64 *pte;
-
- if (domain->iop.mode != PAGE_MODE_NONE)
- return -EINVAL;
-
- pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
- if (pte == NULL)
- return -ENOMEM;
-
- *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
-
- return __amd_iommu_flush_tlb(domain, pasid);
-}
-
-static int __clear_gcr3(struct protection_domain *domain, u32 pasid)
-{
- u64 *pte;
-
- if (domain->iop.mode != PAGE_MODE_NONE)
- return -EINVAL;
-
- pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
- if (pte == NULL)
- return 0;
-
- *pte = 0;
-
- return __amd_iommu_flush_tlb(domain, pasid);
-}
-
-int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
- unsigned long cr3)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __set_gcr3(domain, pasid, cr3);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __clear_gcr3(domain, pasid);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
- int status, int tag)
-{
- struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu;
- struct iommu_cmd cmd;
-
- dev_data = dev_iommu_priv_get(&pdev->dev);
- iommu = rlookup_amd_iommu(&pdev->dev);
- if (!iommu)
- return -ENODEV;
-
- build_complete_ppr(&cmd, dev_data->devid, pasid, status,
- tag, dev_data->pri_tlp);
-
- return iommu_queue_command(iommu, &cmd);
-}
-
#ifdef CONFIG_IRQ_REMAP
/*****************************************************************************
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index ef3ee95706..eb1e62cd49 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -779,7 +779,8 @@ static void apple_dart_domain_free(struct iommu_domain *domain)
kfree(dart_domain);
}
-static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int apple_dart_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 4a27fbdb2d..2cd433a9c8 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -10,7 +10,6 @@
#include <linux/slab.h>
#include "arm-smmu-v3.h"
-#include "../../iommu-sva.h"
#include "../../io-pgtable-arm.h"
struct arm_smmu_mmu_notifier {
@@ -364,7 +363,13 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
struct arm_smmu_bond *bond;
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_domain *smmu_domain;
+
+ if (!(domain->type & __IOMMU_DOMAIN_PAGING))
+ return -ENODEV;
+ smmu_domain = to_smmu_domain(domain);
+ if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
+ return -ENODEV;
if (!master || !master->sva_enabled)
return -ENODEV;
@@ -470,7 +475,6 @@ bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
{
- int ret;
struct device *dev = master->dev;
/*
@@ -483,16 +487,7 @@ static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
if (!master->iopf_enabled)
return -EINVAL;
- ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
- if (ret)
- return ret;
-
- ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
- if (ret) {
- iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
- return ret;
- }
- return 0;
+ return iopf_queue_add_device(master->smmu->evtq.iopf, dev);
}
static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
@@ -502,7 +497,6 @@ static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
if (!master->iopf_enabled)
return;
- iommu_unregister_device_fault_handler(dev);
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index f3f2e47b6d..3afec8714c 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -29,7 +29,6 @@
#include "arm-smmu-v3.h"
#include "../../dma-iommu.h"
-#include "../../iommu-sva.h"
static bool disable_bypass = true;
module_param(disable_bypass, bool, 0444);
@@ -48,6 +47,9 @@ enum arm_smmu_msi_index {
ARM_SMMU_MAX_MSIS,
};
+static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu,
+ ioasid_t sid);
+
static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
[EVTQ_MSI_INDEX] = {
ARM_SMMU_EVTQ_IRQ_CFG0,
@@ -86,6 +88,9 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
{ 0, NULL},
};
+static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_device *smmu);
+
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
@@ -921,31 +926,29 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
}
-static int arm_smmu_page_response(struct device *dev,
- struct iommu_fault_event *unused,
- struct iommu_page_response *resp)
+static void arm_smmu_page_response(struct device *dev, struct iopf_fault *unused,
+ struct iommu_page_response *resp)
{
struct arm_smmu_cmdq_ent cmd = {0};
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
int sid = master->streams[0].id;
- if (master->stall_enabled) {
- cmd.opcode = CMDQ_OP_RESUME;
- cmd.resume.sid = sid;
- cmd.resume.stag = resp->grpid;
- switch (resp->code) {
- case IOMMU_PAGE_RESP_INVALID:
- case IOMMU_PAGE_RESP_FAILURE:
- cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT;
- break;
- case IOMMU_PAGE_RESP_SUCCESS:
- cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY;
- break;
- default:
- return -EINVAL;
- }
- } else {
- return -ENODEV;
+ if (WARN_ON(!master->stall_enabled))
+ return;
+
+ cmd.opcode = CMDQ_OP_RESUME;
+ cmd.resume.sid = sid;
+ cmd.resume.stag = resp->grpid;
+ switch (resp->code) {
+ case IOMMU_PAGE_RESP_INVALID:
+ case IOMMU_PAGE_RESP_FAILURE:
+ cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT;
+ break;
+ case IOMMU_PAGE_RESP_SUCCESS:
+ cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY;
+ break;
+ default:
+ break;
}
arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
@@ -955,8 +958,6 @@ static int arm_smmu_page_response(struct device *dev,
* terminated... at some point in the future. PRI_RESP is fire and
* forget.
*/
-
- return 0;
}
/* Context descriptor manipulation functions */
@@ -971,6 +972,200 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
+/*
+ * Based on the value of ent report which bits of the STE the HW will access. It
+ * would be nice if this was complete according to the spec, but minimally it
+ * has to capture the bits this driver uses.
+ */
+static void arm_smmu_get_ste_used(const struct arm_smmu_ste *ent,
+ struct arm_smmu_ste *used_bits)
+{
+ unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent->data[0]));
+
+ used_bits->data[0] = cpu_to_le64(STRTAB_STE_0_V);
+ if (!(ent->data[0] & cpu_to_le64(STRTAB_STE_0_V)))
+ return;
+
+ used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_CFG);
+
+ /* S1 translates */
+ if (cfg & BIT(0)) {
+ used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT |
+ STRTAB_STE_0_S1CTXPTR_MASK |
+ STRTAB_STE_0_S1CDMAX);
+ used_bits->data[1] |=
+ cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR |
+ STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH |
+ STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW |
+ STRTAB_STE_1_EATS);
+ used_bits->data[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
+ }
+
+ /* S2 translates */
+ if (cfg & BIT(1)) {
+ used_bits->data[1] |=
+ cpu_to_le64(STRTAB_STE_1_EATS | STRTAB_STE_1_SHCFG);
+ used_bits->data[2] |=
+ cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR |
+ STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI |
+ STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2R);
+ used_bits->data[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK);
+ }
+
+ if (cfg == STRTAB_STE_0_CFG_BYPASS)
+ used_bits->data[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
+}
+
+/*
+ * Figure out if we can do a hitless update of entry to become target. Returns a
+ * bit mask where 1 indicates that qword needs to be set disruptively.
+ * unused_update is an intermediate value of entry that has unused bits set to
+ * their new values.
+ */
+static u8 arm_smmu_entry_qword_diff(const struct arm_smmu_ste *entry,
+ const struct arm_smmu_ste *target,
+ struct arm_smmu_ste *unused_update)
+{
+ struct arm_smmu_ste target_used = {};
+ struct arm_smmu_ste cur_used = {};
+ u8 used_qword_diff = 0;
+ unsigned int i;
+
+ arm_smmu_get_ste_used(entry, &cur_used);
+ arm_smmu_get_ste_used(target, &target_used);
+
+ for (i = 0; i != ARRAY_SIZE(target_used.data); i++) {
+ /*
+ * Check that masks are up to date, the make functions are not
+ * allowed to set a bit to 1 if the used function doesn't say it
+ * is used.
+ */
+ WARN_ON_ONCE(target->data[i] & ~target_used.data[i]);
+
+ /* Bits can change because they are not currently being used */
+ unused_update->data[i] = (entry->data[i] & cur_used.data[i]) |
+ (target->data[i] & ~cur_used.data[i]);
+ /*
+ * Each bit indicates that a used bit in a qword needs to be
+ * changed after unused_update is applied.
+ */
+ if ((unused_update->data[i] & target_used.data[i]) !=
+ target->data[i])
+ used_qword_diff |= 1 << i;
+ }
+ return used_qword_diff;
+}
+
+static bool entry_set(struct arm_smmu_device *smmu, ioasid_t sid,
+ struct arm_smmu_ste *entry,
+ const struct arm_smmu_ste *target, unsigned int start,
+ unsigned int len)
+{
+ bool changed = false;
+ unsigned int i;
+
+ for (i = start; len != 0; len--, i++) {
+ if (entry->data[i] != target->data[i]) {
+ WRITE_ONCE(entry->data[i], target->data[i]);
+ changed = true;
+ }
+ }
+
+ if (changed)
+ arm_smmu_sync_ste_for_sid(smmu, sid);
+ return changed;
+}
+
+/*
+ * Update the STE/CD to the target configuration. The transition from the
+ * current entry to the target entry takes place over multiple steps that
+ * attempts to make the transition hitless if possible. This function takes care
+ * not to create a situation where the HW can perceive a corrupted entry. HW is
+ * only required to have a 64 bit atomicity with stores from the CPU, while
+ * entries are many 64 bit values big.
+ *
+ * The difference between the current value and the target value is analyzed to
+ * determine which of three updates are required - disruptive, hitless or no
+ * change.
+ *
+ * In the most general disruptive case we can make any update in three steps:
+ * - Disrupting the entry (V=0)
+ * - Fill now unused qwords, execpt qword 0 which contains V
+ * - Make qword 0 have the final value and valid (V=1) with a single 64
+ * bit store
+ *
+ * However this disrupts the HW while it is happening. There are several
+ * interesting cases where a STE/CD can be updated without disturbing the HW
+ * because only a small number of bits are changing (S1DSS, CONFIG, etc) or
+ * because the used bits don't intersect. We can detect this by calculating how
+ * many 64 bit values need update after adjusting the unused bits and skip the
+ * V=0 process. This relies on the IGNORED behavior described in the
+ * specification.
+ */
+static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
+ struct arm_smmu_ste *entry,
+ const struct arm_smmu_ste *target)
+{
+ unsigned int num_entry_qwords = ARRAY_SIZE(target->data);
+ struct arm_smmu_device *smmu = master->smmu;
+ struct arm_smmu_ste unused_update;
+ u8 used_qword_diff;
+
+ used_qword_diff =
+ arm_smmu_entry_qword_diff(entry, target, &unused_update);
+ if (hweight8(used_qword_diff) == 1) {
+ /*
+ * Only one qword needs its used bits to be changed. This is a
+ * hitless update, update all bits the current STE is ignoring
+ * to their new values, then update a single "critical qword" to
+ * change the STE and finally 0 out any bits that are now unused
+ * in the target configuration.
+ */
+ unsigned int critical_qword_index = ffs(used_qword_diff) - 1;
+
+ /*
+ * Skip writing unused bits in the critical qword since we'll be
+ * writing it in the next step anyways. This can save a sync
+ * when the only change is in that qword.
+ */
+ unused_update.data[critical_qword_index] =
+ entry->data[critical_qword_index];
+ entry_set(smmu, sid, entry, &unused_update, 0, num_entry_qwords);
+ entry_set(smmu, sid, entry, target, critical_qword_index, 1);
+ entry_set(smmu, sid, entry, target, 0, num_entry_qwords);
+ } else if (used_qword_diff) {
+ /*
+ * At least two qwords need their inuse bits to be changed. This
+ * requires a breaking update, zero the V bit, write all qwords
+ * but 0, then set qword 0
+ */
+ unused_update.data[0] = entry->data[0] &
+ cpu_to_le64(~STRTAB_STE_0_V);
+ entry_set(smmu, sid, entry, &unused_update, 0, 1);
+ entry_set(smmu, sid, entry, target, 1, num_entry_qwords - 1);
+ entry_set(smmu, sid, entry, target, 0, 1);
+ } else {
+ /*
+ * No inuse bit changed. Sanity check that all unused bits are 0
+ * in the entry. The target was already sanity checked by
+ * compute_qword_diff().
+ */
+ WARN_ON_ONCE(
+ entry_set(smmu, sid, entry, target, 0, num_entry_qwords));
+ }
+
+ /* It's likely that we'll want to use the new STE soon */
+ if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) {
+ struct arm_smmu_cmdq_ent
+ prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG,
+ .prefetch = {
+ .sid = sid,
+ } };
+
+ arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ }
+}
+
static void arm_smmu_sync_cd(struct arm_smmu_master *master,
int ssid, bool leaf)
{
@@ -1251,158 +1446,138 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
-static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
- struct arm_smmu_ste *dst)
+static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
{
- /*
- * This is hideously complicated, but we only really care about
- * three cases at the moment:
- *
- * 1. Invalid (all zero) -> bypass/fault (init)
- * 2. Bypass/fault -> translation/bypass (attach)
- * 3. Translation/bypass -> bypass/fault (detach)
- *
- * Given that we can't update the STE atomically and the SMMU
- * doesn't read the thing in a defined order, that leaves us
- * with the following maintenance requirements:
- *
- * 1. Update Config, return (init time STEs aren't live)
- * 2. Write everything apart from dword 0, sync, write dword 0, sync
- * 3. Update Config, sync
- */
- u64 val = le64_to_cpu(dst->data[0]);
- bool ste_live = false;
- struct arm_smmu_device *smmu = master->smmu;
- struct arm_smmu_ctx_desc_cfg *cd_table = NULL;
- struct arm_smmu_s2_cfg *s2_cfg = NULL;
- struct arm_smmu_domain *smmu_domain = master->domain;
- struct arm_smmu_cmdq_ent prefetch_cmd = {
- .opcode = CMDQ_OP_PREFETCH_CFG,
- .prefetch = {
- .sid = sid,
- },
- };
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT));
+}
- if (smmu_domain) {
- switch (smmu_domain->stage) {
- case ARM_SMMU_DOMAIN_S1:
- cd_table = &master->cd_table;
- break;
- case ARM_SMMU_DOMAIN_S2:
- s2_cfg = &smmu_domain->s2_cfg;
- break;
- default:
- break;
- }
- }
+static void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
+ struct arm_smmu_ste *target)
+{
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS));
- if (val & STRTAB_STE_0_V) {
- switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
- case STRTAB_STE_0_CFG_BYPASS:
- break;
- case STRTAB_STE_0_CFG_S1_TRANS:
- case STRTAB_STE_0_CFG_S2_TRANS:
- ste_live = true;
- break;
- case STRTAB_STE_0_CFG_ABORT:
- BUG_ON(!disable_bypass);
- break;
- default:
- BUG(); /* STE corruption */
- }
- }
+ if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR)
+ target->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
+}
- /* Nuke the existing STE_0 value, as we're going to rewrite it */
- val = STRTAB_STE_0_V;
+static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master)
+{
+ struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
+ struct arm_smmu_device *smmu = master->smmu;
- /* Bypass/fault */
- if (!smmu_domain || !(cd_table || s2_cfg)) {
- if (!smmu_domain && disable_bypass)
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
- else
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
+ FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt) |
+ (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
+ FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax));
+
+ target->data[1] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
+ FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
+ FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
+ FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
+ ((smmu->features & ARM_SMMU_FEAT_STALLS &&
+ !master->stall_enabled) ?
+ STRTAB_STE_1_S1STALLD :
+ 0) |
+ FIELD_PREP(STRTAB_STE_1_EATS,
+ master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0));
+
+ if (smmu->features & ARM_SMMU_FEAT_E2H) {
+ /*
+ * To support BTM the streamworld needs to match the
+ * configuration of the CPU so that the ASID broadcasts are
+ * properly matched. This means either S/NS-EL2-E2H (hypervisor)
+ * or NS-EL1 (guest). Since an SVA domain can be installed in a
+ * PASID this should always use a BTM compatible configuration
+ * if the HW supports it.
+ */
+ target->data[1] |= cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_EL2));
+ } else {
+ target->data[1] |= cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
- dst->data[0] = cpu_to_le64(val);
- dst->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
- STRTAB_STE_1_SHCFG_INCOMING));
- dst->data[2] = 0; /* Nuke the VMID */
/*
- * The SMMU can perform negative caching, so we must sync
- * the STE regardless of whether the old value was live.
+ * VMID 0 is reserved for stage-2 bypass EL1 STEs, see
+ * arm_smmu_domain_alloc_id()
*/
- if (smmu)
- arm_smmu_sync_ste_for_sid(smmu, sid);
- return;
+ target->data[2] =
+ cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0));
}
+}
- if (cd_table) {
- u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
- STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
-
- BUG_ON(ste_live);
- dst->data[1] = cpu_to_le64(
- FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
- FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
- FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
- FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
- FIELD_PREP(STRTAB_STE_1_STRW, strw));
-
- if (smmu->features & ARM_SMMU_FEAT_STALLS &&
- !master->stall_enabled)
- dst->data[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
-
- val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
- FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
- FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax) |
- FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt);
- }
+static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg;
+ const struct io_pgtable_cfg *pgtbl_cfg =
+ &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg;
+ typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr =
+ &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+ u64 vtcr_val;
+ struct arm_smmu_device *smmu = master->smmu;
- if (s2_cfg) {
- BUG_ON(ste_live);
- dst->data[2] = cpu_to_le64(
- FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
- FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS));
+
+ target->data[1] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_EATS,
+ master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0));
+
+ if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR)
+ target->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
+
+ vtcr_val = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
+ target->data[2] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
+ FIELD_PREP(STRTAB_STE_2_VTCR, vtcr_val) |
+ STRTAB_STE_2_S2AA64 |
#ifdef __BIG_ENDIAN
- STRTAB_STE_2_S2ENDI |
+ STRTAB_STE_2_S2ENDI |
#endif
- STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
- STRTAB_STE_2_S2R);
-
- dst->data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
-
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
- }
-
- if (master->ats_enabled)
- dst->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
- STRTAB_STE_1_EATS_TRANS));
+ STRTAB_STE_2_S2PTW |
+ STRTAB_STE_2_S2R);
- arm_smmu_sync_ste_for_sid(smmu, sid);
- /* See comment in arm_smmu_write_ctx_desc() */
- WRITE_ONCE(dst->data[0], cpu_to_le64(val));
- arm_smmu_sync_ste_for_sid(smmu, sid);
-
- /* It's likely that we'll want to use the new STE soon */
- if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
- arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr &
+ STRTAB_STE_3_S2TTB_MASK);
}
-static void arm_smmu_init_bypass_stes(struct arm_smmu_ste *strtab,
- unsigned int nent, bool force)
+/*
+ * This can safely directly manipulate the STE memory without a sync sequence
+ * because the STE table has not been installed in the SMMU yet.
+ */
+static void arm_smmu_init_initial_stes(struct arm_smmu_device *smmu,
+ struct arm_smmu_ste *strtab,
+ unsigned int nent)
{
unsigned int i;
- u64 val = STRTAB_STE_0_V;
-
- if (disable_bypass && !force)
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
- else
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
for (i = 0; i < nent; ++i) {
- strtab->data[0] = cpu_to_le64(val);
- strtab->data[1] = cpu_to_le64(FIELD_PREP(
- STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
- strtab->data[2] = 0;
+ if (disable_bypass)
+ arm_smmu_make_abort_ste(strtab);
+ else
+ arm_smmu_make_bypass_ste(smmu, strtab);
strtab++;
}
}
@@ -1430,7 +1605,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return -ENOMEM;
}
- arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false);
+ arm_smmu_init_initial_stes(smmu, desc->l2ptr, 1 << STRTAB_SPLIT);
arm_smmu_write_strtab_l1_desc(strtab, desc);
return 0;
}
@@ -1460,27 +1635,19 @@ arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
/* IRQ and event handlers */
static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
{
- int ret;
- u32 reason;
+ int ret = 0;
u32 perm = 0;
struct arm_smmu_master *master;
bool ssid_valid = evt[0] & EVTQ_0_SSV;
u32 sid = FIELD_GET(EVTQ_0_SID, evt[0]);
- struct iommu_fault_event fault_evt = { };
+ struct iopf_fault fault_evt = { };
struct iommu_fault *flt = &fault_evt.fault;
switch (FIELD_GET(EVTQ_0_ID, evt[0])) {
case EVT_ID_TRANSLATION_FAULT:
- reason = IOMMU_FAULT_REASON_PTE_FETCH;
- break;
case EVT_ID_ADDR_SIZE_FAULT:
- reason = IOMMU_FAULT_REASON_OOR_ADDRESS;
- break;
case EVT_ID_ACCESS_FAULT:
- reason = IOMMU_FAULT_REASON_ACCESS;
- break;
case EVT_ID_PERMISSION_FAULT:
- reason = IOMMU_FAULT_REASON_PERMISSION;
break;
default:
return -EOPNOTSUPP;
@@ -1490,6 +1657,9 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
if (evt[1] & EVTQ_1_S2)
return -EFAULT;
+ if (!(evt[1] & EVTQ_1_STALL))
+ return -EOPNOTSUPP;
+
if (evt[1] & EVTQ_1_RnW)
perm |= IOMMU_FAULT_PERM_READ;
else
@@ -1501,32 +1671,17 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
if (evt[1] & EVTQ_1_PnU)
perm |= IOMMU_FAULT_PERM_PRIV;
- if (evt[1] & EVTQ_1_STALL) {
- flt->type = IOMMU_FAULT_PAGE_REQ;
- flt->prm = (struct iommu_fault_page_request) {
- .flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
- .grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
- .perm = perm,
- .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
- };
-
- if (ssid_valid) {
- flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
- flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
- }
- } else {
- flt->type = IOMMU_FAULT_DMA_UNRECOV;
- flt->event = (struct iommu_fault_unrecoverable) {
- .reason = reason,
- .flags = IOMMU_FAULT_UNRECOV_ADDR_VALID,
- .perm = perm,
- .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
- };
+ flt->type = IOMMU_FAULT_PAGE_REQ;
+ flt->prm = (struct iommu_fault_page_request) {
+ .flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
+ .grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
+ .perm = perm,
+ .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
+ };
- if (ssid_valid) {
- flt->event.flags |= IOMMU_FAULT_UNRECOV_PASID_VALID;
- flt->event.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
- }
+ if (ssid_valid) {
+ flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+ flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
}
mutex_lock(&smmu->streams_mutex);
@@ -1536,17 +1691,7 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
goto out_unlock;
}
- ret = iommu_report_device_fault(master->dev, &fault_evt);
- if (ret && flt->type == IOMMU_FAULT_PAGE_REQ) {
- /* Nobody cared, abort the access */
- struct iommu_page_response resp = {
- .pasid = flt->prm.pasid,
- .grpid = flt->prm.grpid,
- .code = IOMMU_PAGE_RESP_FAILURE,
- };
- arm_smmu_page_response(master->dev, &fault_evt, &resp);
- }
-
+ iommu_report_device_fault(master->dev, &fault_evt);
out_unlock:
mutex_unlock(&smmu->streams_mutex);
return ret;
@@ -2025,15 +2170,15 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
- struct arm_smmu_domain *smmu_domain;
if (type == IOMMU_DOMAIN_SVA)
return arm_smmu_sva_domain_alloc();
+ return ERR_PTR(-EOPNOTSUPP);
+}
- if (type != IOMMU_DOMAIN_UNMANAGED &&
- type != IOMMU_DOMAIN_DMA &&
- type != IOMMU_DOMAIN_IDENTITY)
- return NULL;
+static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
+{
+ struct arm_smmu_domain *smmu_domain;
/*
* Allocate the domain and initialise some of its data structures.
@@ -2042,13 +2187,23 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
*/
smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
if (!smmu_domain)
- return NULL;
+ return ERR_PTR(-ENOMEM);
mutex_init(&smmu_domain->init_mutex);
INIT_LIST_HEAD(&smmu_domain->devices);
spin_lock_init(&smmu_domain->devices_lock);
INIT_LIST_HEAD(&smmu_domain->mmu_notifiers);
+ if (dev) {
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ int ret;
+
+ ret = arm_smmu_domain_finalise(smmu_domain, master->smmu);
+ if (ret) {
+ kfree(smmu_domain);
+ return ERR_PTR(ret);
+ }
+ }
return &smmu_domain->domain;
}
@@ -2074,12 +2229,12 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
kfree(smmu_domain);
}
-static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
+static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
+ struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
int ret;
u32 asid;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
@@ -2111,13 +2266,12 @@ out_unlock:
return ret;
}
-static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
+static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
+ struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
int vmid;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
- typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
/* Reserve VMID 0 for stage-2 bypass STEs */
vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1,
@@ -2125,35 +2279,21 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
if (vmid < 0)
return vmid;
- vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
cfg->vmid = (u16)vmid;
- cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
- cfg->vtcr = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
return 0;
}
-static int arm_smmu_domain_finalise(struct iommu_domain *domain)
+static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_device *smmu)
{
int ret;
unsigned long ias, oas;
enum io_pgtable_fmt fmt;
struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable_ops *pgtbl_ops;
- int (*finalise_stage_fn)(struct arm_smmu_domain *,
- struct io_pgtable_cfg *);
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- struct arm_smmu_device *smmu = smmu_domain->smmu;
-
- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
- smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
- return 0;
- }
+ int (*finalise_stage_fn)(struct arm_smmu_device *smmu,
+ struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg);
/* Restrict the stage to what we can actually support */
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
@@ -2192,17 +2332,18 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
if (!pgtbl_ops)
return -ENOMEM;
- domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
- domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
- domain->geometry.force_aperture = true;
+ smmu_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+ smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
+ smmu_domain->domain.geometry.force_aperture = true;
- ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
+ ret = finalise_stage_fn(smmu, smmu_domain, &pgtbl_cfg);
if (ret < 0) {
free_io_pgtable_ops(pgtbl_ops);
return ret;
}
smmu_domain->pgtbl_ops = pgtbl_ops;
+ smmu_domain->smmu = smmu;
return 0;
}
@@ -2225,7 +2366,8 @@ arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
}
}
-static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
+static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master,
+ const struct arm_smmu_ste *target)
{
int i, j;
struct arm_smmu_device *smmu = master->smmu;
@@ -2242,7 +2384,7 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
if (j < i)
continue;
- arm_smmu_write_strtab_ent(master, sid, step);
+ arm_smmu_write_ste(master, sid, step, target);
}
}
@@ -2261,12 +2403,12 @@ static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev));
}
-static void arm_smmu_enable_ats(struct arm_smmu_master *master)
+static void arm_smmu_enable_ats(struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
{
size_t stu;
struct pci_dev *pdev;
struct arm_smmu_device *smmu = master->smmu;
- struct arm_smmu_domain *smmu_domain = master->domain;
/* Don't enable ATS at the endpoint if it's not enabled in the STE */
if (!master->ats_enabled)
@@ -2282,10 +2424,9 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master)
dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
}
-static void arm_smmu_disable_ats(struct arm_smmu_master *master)
+static void arm_smmu_disable_ats(struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
{
- struct arm_smmu_domain *smmu_domain = master->domain;
-
if (!master->ats_enabled)
return;
@@ -2348,35 +2489,28 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
static void arm_smmu_detach_dev(struct arm_smmu_master *master)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(master->dev);
+ struct arm_smmu_domain *smmu_domain;
unsigned long flags;
- struct arm_smmu_domain *smmu_domain = master->domain;
- if (!smmu_domain)
+ if (!domain || !(domain->type & __IOMMU_DOMAIN_PAGING))
return;
- arm_smmu_disable_ats(master);
+ smmu_domain = to_smmu_domain(domain);
+ arm_smmu_disable_ats(master, smmu_domain);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_del(&master->domain_head);
+ list_del_init(&master->domain_head);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- master->domain = NULL;
master->ats_enabled = false;
- arm_smmu_install_ste_for_dev(master);
- /*
- * Clearing the CD entry isn't strictly required to detach the domain
- * since the table is uninstalled anyway, but it helps avoid confusion
- * in the call to arm_smmu_write_ctx_desc on the next attach (which
- * expects the entry to be empty).
- */
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && master->cd_table.cdtab)
- arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL);
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
unsigned long flags;
+ struct arm_smmu_ste target;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -2401,10 +2535,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
mutex_lock(&smmu_domain->init_mutex);
if (!smmu_domain->smmu) {
- smmu_domain->smmu = smmu;
- ret = arm_smmu_domain_finalise(domain);
- if (ret)
- smmu_domain->smmu = NULL;
+ ret = arm_smmu_domain_finalise(smmu_domain, smmu);
} else if (smmu_domain->smmu != smmu)
ret = -EINVAL;
@@ -2422,46 +2553,51 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
arm_smmu_detach_dev(master);
- master->domain = smmu_domain;
-
- /*
- * The SMMU does not support enabling ATS with bypass. When the STE is
- * in bypass (STE.Config[2:0] == 0b100), ATS Translation Requests and
- * Translated transactions are denied as though ATS is disabled for the
- * stream (STE.EATS == 0b00), causing F_BAD_ATS_TREQ and
- * F_TRANSL_FORBIDDEN events (IHI0070Ea 5.2 Stream Table Entry).
- */
- if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
- master->ats_enabled = arm_smmu_ats_supported(master);
+ master->ats_enabled = arm_smmu_ats_supported(master);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_add(&master->domain_head, &smmu_domain->devices);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ switch (smmu_domain->stage) {
+ case ARM_SMMU_DOMAIN_S1:
if (!master->cd_table.cdtab) {
ret = arm_smmu_alloc_cd_tables(master);
- if (ret) {
- master->domain = NULL;
+ if (ret)
+ goto out_list_del;
+ } else {
+ /*
+ * arm_smmu_write_ctx_desc() relies on the entry being
+ * invalid to work, clear any existing entry.
+ */
+ ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
+ NULL);
+ if (ret)
goto out_list_del;
- }
}
ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, &smmu_domain->cd);
- if (ret) {
- master->domain = NULL;
+ if (ret)
goto out_list_del;
- }
- }
- arm_smmu_install_ste_for_dev(master);
+ arm_smmu_make_cdtable_ste(&target, master);
+ arm_smmu_install_ste_for_dev(master, &target);
+ break;
+ case ARM_SMMU_DOMAIN_S2:
+ arm_smmu_make_s2_domain_ste(&target, master, smmu_domain);
+ arm_smmu_install_ste_for_dev(master, &target);
+ if (master->cd_table.cdtab)
+ arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
+ NULL);
+ break;
+ }
- arm_smmu_enable_ats(master);
+ arm_smmu_enable_ats(master, smmu_domain);
goto out_unlock;
out_list_del:
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_del(&master->domain_head);
+ list_del_init(&master->domain_head);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
out_unlock:
@@ -2469,6 +2605,79 @@ out_unlock:
return ret;
}
+static int arm_smmu_attach_dev_ste(struct device *dev,
+ struct arm_smmu_ste *ste)
+{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+ if (arm_smmu_master_sva_enabled(master))
+ return -EBUSY;
+
+ /*
+ * Do not allow any ASID to be changed while are working on the STE,
+ * otherwise we could miss invalidations.
+ */
+ mutex_lock(&arm_smmu_asid_lock);
+
+ /*
+ * The SMMU does not support enabling ATS with bypass/abort. When the
+ * STE is in bypass (STE.Config[2:0] == 0b100), ATS Translation Requests
+ * and Translated transactions are denied as though ATS is disabled for
+ * the stream (STE.EATS == 0b00), causing F_BAD_ATS_TREQ and
+ * F_TRANSL_FORBIDDEN events (IHI0070Ea 5.2 Stream Table Entry).
+ */
+ arm_smmu_detach_dev(master);
+
+ arm_smmu_install_ste_for_dev(master, ste);
+ mutex_unlock(&arm_smmu_asid_lock);
+
+ /*
+ * This has to be done after removing the master from the
+ * arm_smmu_domain->devices to avoid races updating the same context
+ * descriptor from arm_smmu_share_asid().
+ */
+ if (master->cd_table.cdtab)
+ arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL);
+ return 0;
+}
+
+static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct arm_smmu_ste ste;
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+ arm_smmu_make_bypass_ste(master->smmu, &ste);
+ return arm_smmu_attach_dev_ste(dev, &ste);
+}
+
+static const struct iommu_domain_ops arm_smmu_identity_ops = {
+ .attach_dev = arm_smmu_attach_dev_identity,
+};
+
+static struct iommu_domain arm_smmu_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &arm_smmu_identity_ops,
+};
+
+static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_make_abort_ste(&ste);
+ return arm_smmu_attach_dev_ste(dev, &ste);
+}
+
+static const struct iommu_domain_ops arm_smmu_blocked_ops = {
+ .attach_dev = arm_smmu_attach_dev_blocked,
+};
+
+static struct iommu_domain arm_smmu_blocked_domain = {
+ .type = IOMMU_DOMAIN_BLOCKED,
+ .ops = &arm_smmu_blocked_ops,
+};
+
static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -2662,6 +2871,7 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
master->dev = dev;
master->smmu = smmu;
INIT_LIST_HEAD(&master->bonds);
+ INIT_LIST_HEAD(&master->domain_head);
dev_iommu_priv_set(dev, master);
ret = arm_smmu_insert_master(smmu, master);
@@ -2703,7 +2913,13 @@ static void arm_smmu_release_device(struct device *dev)
if (WARN_ON(arm_smmu_master_sva_enabled(master)))
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
- arm_smmu_detach_dev(master);
+
+ /* Put the STE back to what arm_smmu_init_strtab() sets */
+ if (disable_bypass && !dev->iommu->require_direct)
+ arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev);
+ else
+ arm_smmu_attach_dev_identity(&arm_smmu_identity_domain, dev);
+
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
if (master->cd_table.cdtab)
@@ -2743,7 +2959,8 @@ static int arm_smmu_enable_nesting(struct iommu_domain *domain)
return ret;
}
-static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int arm_smmu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
return iommu_fwspec_add_ids(dev, args->args, 1);
}
@@ -2848,8 +3065,11 @@ static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
}
static struct iommu_ops arm_smmu_ops = {
+ .identity_domain = &arm_smmu_identity_domain,
+ .blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
+ .domain_alloc_paging = arm_smmu_domain_alloc_paging,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
@@ -3053,7 +3273,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg;
- arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false);
+ arm_smmu_init_initial_stes(smmu, strtab, cfg->num_l1_ents);
return 0;
}
@@ -3129,7 +3349,8 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
static void arm_smmu_free_msis(void *data)
{
struct device *dev = data;
- platform_msi_domain_free_irqs(dev);
+
+ platform_device_msi_free_irqs_all(dev);
}
static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
@@ -3170,7 +3391,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
}
/* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
- ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
+ ret = platform_device_msi_init_and_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
if (ret) {
dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n");
return;
@@ -3181,7 +3402,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
/* Add callback to free MSIs on teardown */
- devm_add_action(dev, arm_smmu_free_msis, dev);
+ devm_add_action_or_reset(dev, arm_smmu_free_msis, dev);
}
static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
@@ -3565,6 +3786,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
return -ENXIO;
}
+ if (reg & IDR1_ATTR_TYPES_OVR)
+ smmu->features |= ARM_SMMU_FEAT_ATTR_TYPES_OVR;
+
/* Queue sizes, capped to ensure natural alignment */
smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
FIELD_GET(IDR1_CMDQS, reg));
@@ -3764,7 +3988,6 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
list_for_each_entry(e, &rmr_list, list) {
- struct arm_smmu_ste *step;
struct iommu_iort_rmr_data *rmr;
int ret, i;
@@ -3777,8 +4000,12 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
continue;
}
- step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]);
- arm_smmu_init_bypass_stes(step, 1, true);
+ /*
+ * STE table is not programmed to HW, see
+ * arm_smmu_initial_bypass_stes()
+ */
+ arm_smmu_make_bypass_ste(smmu,
+ arm_smmu_get_step_for_sid(smmu, rmr->sids[i]));
}
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 65fb388d51..2a19bb63e5 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -44,6 +44,7 @@
#define IDR1_TABLES_PRESET (1 << 30)
#define IDR1_QUEUES_PRESET (1 << 29)
#define IDR1_REL (1 << 28)
+#define IDR1_ATTR_TYPES_OVR (1 << 27)
#define IDR1_CMDQS GENMASK(25, 21)
#define IDR1_EVTQS GENMASK(20, 16)
#define IDR1_PRIQS GENMASK(15, 11)
@@ -609,8 +610,6 @@ struct arm_smmu_ctx_desc_cfg {
struct arm_smmu_s2_cfg {
u16 vmid;
- u64 vttbr;
- u64 vtcr;
};
struct arm_smmu_strtab_cfg {
@@ -649,6 +648,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_SVA (1 << 17)
#define ARM_SMMU_FEAT_E2H (1 << 18)
#define ARM_SMMU_FEAT_NESTING (1 << 19)
+#define ARM_SMMU_FEAT_ATTR_TYPES_OVR (1 << 20)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
@@ -697,7 +697,6 @@ struct arm_smmu_stream {
struct arm_smmu_master {
struct arm_smmu_device *smmu;
struct device *dev;
- struct arm_smmu_domain *domain;
struct list_head domain_head;
struct arm_smmu_stream *streams;
/* Locked by the iommu core using the group mutex */
@@ -715,7 +714,6 @@ struct arm_smmu_master {
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
- ARM_SMMU_DOMAIN_BYPASS,
};
struct arm_smmu_domain {
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 8b04ece004..5c7cfc51b5 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -260,6 +260,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sm6375-mdss" },
{ .compatible = "qcom,sm8150-mdss" },
{ .compatible = "qcom,sm8250-mdss" },
+ { .compatible = "qcom,x1e80100-mdss" },
{ }
};
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 6317aaf7b3..c572d877b0 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -1546,7 +1546,8 @@ static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain,
return ret;
}
-static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int arm_smmu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
u32 mask, fwid = 0;
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 17a1c163fe..e079bb7a99 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -546,7 +546,8 @@ static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
return &qcom_iommu->iommu;
}
-static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int qcom_iommu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct qcom_iommu_dev *qcom_iommu;
struct platform_device *iommu_pdev;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 639efa0c40..e4cb26f6a9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -859,6 +859,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
iommu_deferred_attach(dev, domain))
return DMA_MAPPING_ERROR;
+ /* If anyone ever wants this we'd need support in the IOVA allocator */
+ if (dev_WARN_ONCE(dev, dma_get_min_align_mask(dev) > iova_mask(iovad),
+ "Unsupported alignment constraint\n"))
+ return DMA_MAPPING_ERROR;
+
size = iova_align(iovad, size + iova_off);
iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 2c6e9094f1..d98c916194 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1431,7 +1431,7 @@ static void exynos_iommu_release_device(struct device *dev)
}
static int exynos_iommu_of_xlate(struct device *dev,
- struct of_phandle_args *spec)
+ const struct of_phandle_args *spec)
{
struct platform_device *sysmmu = of_find_device_by_node(spec->np);
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index 012cd2541a..f52fb39c96 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -51,6 +51,7 @@ config INTEL_IOMMU_SVM
depends on X86_64
select MMU_NOTIFIER
select IOMMU_SVA
+ select IOMMU_IOPF
help
Shared Virtual Memory (SVM) provides a facility for devices
to access DMA resources through process address space by
@@ -64,17 +65,6 @@ config INTEL_IOMMU_DEFAULT_ON
one is found. If this option is not selected, DMAR support can
be enabled by passing intel_iommu=on to the kernel.
-config INTEL_IOMMU_BROKEN_GFX_WA
- bool "Workaround broken graphics drivers (going away soon)"
- depends on BROKEN && X86
- help
- Current Graphics drivers tend to use physical address
- for DMA and avoid using DMA APIs. Setting this config
- option permits the IOMMU driver to set a unity map for
- all the OS-visible memory. Hence the driver can continue
- to use physical addresses for DMA, at least until this
- option is removed in the 2.6.32 kernel.
-
config INTEL_IOMMU_FLOPPY_WA
def_bool y
depends on X86
@@ -97,8 +87,8 @@ config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
the default value.
config INTEL_IOMMU_PERF_EVENTS
- def_bool y
bool "Intel IOMMU performance events"
+ default y
depends on INTEL_IOMMU && PERF_EVENTS
help
Selecting this option will enable the performance monitoring
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index d7e10f1311..e4a03588a8 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -27,7 +27,6 @@
#include "iommu.h"
#include "../dma-iommu.h"
#include "../irq_remapping.h"
-#include "../iommu-sva.h"
#include "pasid.h"
#include "cap_audit.h"
#include "perfmon.h"
@@ -222,12 +221,11 @@ int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
-static int dmar_map_gfx = 1;
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
static int iommu_skip_te_disable;
+static int disable_igfx_iommu;
-#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
const struct iommu_ops intel_iommu_ops;
@@ -266,7 +264,7 @@ static int __init intel_iommu_setup(char *str)
no_platform_optin = 1;
pr_info("IOMMU disabled\n");
} else if (!strncmp(str, "igfx_off", 8)) {
- dmar_map_gfx = 0;
+ disable_igfx_iommu = 1;
pr_info("Disable GFX device mapping\n");
} else if (!strncmp(str, "forcedac", 8)) {
pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
@@ -1851,34 +1849,17 @@ static void domain_exit(struct dmar_domain *domain)
kfree(domain);
}
-/*
- * Get the PASID directory size for scalable mode context entry.
- * Value of X in the PDTS field of a scalable mode context entry
- * indicates PASID directory with 2^(X + 7) entries.
- */
-static unsigned long context_get_sm_pds(struct pasid_table *table)
-{
- unsigned long pds, max_pde;
-
- max_pde = table->max_pasid >> PASID_PDE_SHIFT;
- pds = find_first_bit(&max_pde, MAX_NR_PASID_BITS);
- if (pds < 7)
- return 0;
-
- return pds - 7;
-}
-
static int domain_context_mapping_one(struct dmar_domain *domain,
struct intel_iommu *iommu,
- struct pasid_table *table,
u8 bus, u8 devfn)
{
struct device_domain_info *info =
domain_lookup_dev_info(domain, iommu, bus, devfn);
u16 did = domain_id_iommu(domain, iommu);
int translation = CONTEXT_TT_MULTI_LEVEL;
+ struct dma_pte *pgd = domain->pgd;
struct context_entry *context;
- int ret;
+ int agaw, ret;
if (hw_pass_through && domain_type_is_si(domain))
translation = CONTEXT_TT_PASS_THROUGH;
@@ -1921,65 +1902,37 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
}
context_clear_entry(context);
+ context_set_domain_id(context, did);
- if (sm_supported(iommu)) {
- unsigned long pds;
-
- /* Setup the PASID DIR pointer: */
- pds = context_get_sm_pds(table);
- context->lo = (u64)virt_to_phys(table->table) |
- context_pdts(pds);
-
- /* Setup the RID_PASID field: */
- context_set_sm_rid2pasid(context, IOMMU_NO_PASID);
-
+ if (translation != CONTEXT_TT_PASS_THROUGH) {
/*
- * Setup the Device-TLB enable bit and Page request
- * Enable bit:
+ * Skip top levels of page tables for iommu which has
+ * less agaw than default. Unnecessary for PT mode.
*/
- if (info && info->ats_supported)
- context_set_sm_dte(context);
- if (info && info->pri_supported)
- context_set_sm_pre(context);
- if (info && info->pasid_supported)
- context_set_pasid(context);
- } else {
- struct dma_pte *pgd = domain->pgd;
- int agaw;
-
- context_set_domain_id(context, did);
-
- if (translation != CONTEXT_TT_PASS_THROUGH) {
- /*
- * Skip top levels of page tables for iommu which has
- * less agaw than default. Unnecessary for PT mode.
- */
- for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
- ret = -ENOMEM;
- pgd = phys_to_virt(dma_pte_addr(pgd));
- if (!dma_pte_present(pgd))
- goto out_unlock;
- }
-
- if (info && info->ats_supported)
- translation = CONTEXT_TT_DEV_IOTLB;
- else
- translation = CONTEXT_TT_MULTI_LEVEL;
-
- context_set_address_root(context, virt_to_phys(pgd));
- context_set_address_width(context, agaw);
- } else {
- /*
- * In pass through mode, AW must be programmed to
- * indicate the largest AGAW value supported by
- * hardware. And ASR is ignored by hardware.
- */
- context_set_address_width(context, iommu->msagaw);
+ for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
+ ret = -ENOMEM;
+ pgd = phys_to_virt(dma_pte_addr(pgd));
+ if (!dma_pte_present(pgd))
+ goto out_unlock;
}
- context_set_translation_type(context, translation);
+ if (info && info->ats_supported)
+ translation = CONTEXT_TT_DEV_IOTLB;
+ else
+ translation = CONTEXT_TT_MULTI_LEVEL;
+
+ context_set_address_root(context, virt_to_phys(pgd));
+ context_set_address_width(context, agaw);
+ } else {
+ /*
+ * In pass through mode, AW must be programmed to
+ * indicate the largest AGAW value supported by
+ * hardware. And ASR is ignored by hardware.
+ */
+ context_set_address_width(context, iommu->msagaw);
}
+ context_set_translation_type(context, translation);
context_set_fault_enable(context);
context_set_present(context);
if (!ecap_coherent(iommu->ecap))
@@ -2009,43 +1962,29 @@ out_unlock:
return ret;
}
-struct domain_context_mapping_data {
- struct dmar_domain *domain;
- struct intel_iommu *iommu;
- struct pasid_table *table;
-};
-
static int domain_context_mapping_cb(struct pci_dev *pdev,
u16 alias, void *opaque)
{
- struct domain_context_mapping_data *data = opaque;
+ struct device_domain_info *info = dev_iommu_priv_get(&pdev->dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct dmar_domain *domain = opaque;
- return domain_context_mapping_one(data->domain, data->iommu,
- data->table, PCI_BUS_NUM(alias),
- alias & 0xff);
+ return domain_context_mapping_one(domain, iommu,
+ PCI_BUS_NUM(alias), alias & 0xff);
}
static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct domain_context_mapping_data data;
struct intel_iommu *iommu = info->iommu;
u8 bus = info->bus, devfn = info->devfn;
- struct pasid_table *table;
-
- table = intel_pasid_get_table(dev);
if (!dev_is_pci(dev))
- return domain_context_mapping_one(domain, iommu, table,
- bus, devfn);
-
- data.domain = domain;
- data.iommu = iommu;
- data.table = table;
+ return domain_context_mapping_one(domain, iommu, bus, devfn);
return pci_for_each_dma_alias(to_pci_dev(dev),
- &domain_context_mapping_cb, &data);
+ domain_context_mapping_cb, domain);
}
/* Returns a number of VTD pages, but aligned to MM page size */
@@ -2235,9 +2174,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
struct context_entry *context;
u16 did_old;
- if (!iommu)
- return;
-
spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0);
if (!context) {
@@ -2245,14 +2181,7 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
return;
}
- if (sm_supported(iommu)) {
- if (hw_pass_through && domain_type_is_si(info->domain))
- did_old = FLPT_DEFAULT_DID;
- else
- did_old = domain_id_iommu(info->domain, iommu);
- } else {
- did_old = context_domain_id(context);
- }
+ did_old = context_domain_id(context);
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
@@ -2263,9 +2192,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
- if (sm_supported(iommu))
- qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
-
iommu->flush.flush_iotlb(iommu,
did_old,
0,
@@ -2405,28 +2331,19 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
list_add(&info->link, &domain->devices);
spin_unlock_irqrestore(&domain->lock, flags);
- /* PASID table is mandatory for a PCI device in scalable mode. */
- if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
- /* Setup the PASID entry for requests without PASID: */
- if (hw_pass_through && domain_type_is_si(domain))
- ret = intel_pasid_setup_pass_through(iommu,
- dev, IOMMU_NO_PASID);
- else if (domain->use_first_level)
- ret = domain_setup_first_level(iommu, domain, dev,
- IOMMU_NO_PASID);
- else
- ret = intel_pasid_setup_second_level(iommu, domain,
- dev, IOMMU_NO_PASID);
- if (ret) {
- dev_err(dev, "Setup RID2PASID failed\n");
- device_block_translation(dev);
- return ret;
- }
- }
+ if (dev_is_real_dma_subdevice(dev))
+ return 0;
+
+ if (!sm_supported(iommu))
+ ret = domain_context_mapping(domain, dev);
+ else if (hw_pass_through && domain_type_is_si(domain))
+ ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID);
+ else if (domain->use_first_level)
+ ret = domain_setup_first_level(iommu, domain, dev, IOMMU_NO_PASID);
+ else
+ ret = intel_pasid_setup_second_level(iommu, domain, dev, IOMMU_NO_PASID);
- ret = domain_context_mapping(domain, dev);
if (ret) {
- dev_err(dev, "Domain context map failed\n");
device_block_translation(dev);
return ret;
}
@@ -2484,9 +2401,6 @@ static int device_def_domain_type(struct device *dev)
if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
return IOMMU_DOMAIN_IDENTITY;
-
- if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
- return IOMMU_DOMAIN_IDENTITY;
}
return 0;
@@ -2787,13 +2701,6 @@ static int __init init_dmars(void)
iommu_set_root_entry(iommu);
}
-#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
- dmar_map_gfx = 0;
-#endif
-
- if (!dmar_map_gfx)
- iommu_identity_mapping |= IDENTMAP_GFX;
-
check_tylersburg_isoch();
ret = si_domain_init(hw_pass_through);
@@ -2884,7 +2791,7 @@ static void __init init_no_remapping_devices(void)
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
drhd->gfx_dedicated = 1;
- if (!dmar_map_gfx)
+ if (disable_igfx_iommu)
drhd->ignored = 1;
}
}
@@ -4078,6 +3985,10 @@ int prepare_domain_attach_device(struct iommu_domain *domain,
dmar_domain->agaw--;
}
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
+ context_copied(iommu, info->bus, info->devfn))
+ return intel_pasid_setup_sm_context(dev);
+
return 0;
}
@@ -4393,11 +4304,19 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
dev_err(dev, "PASID table allocation failed\n");
goto clear_rbtree;
}
+
+ if (!context_copied(iommu, info->bus, info->devfn)) {
+ ret = intel_pasid_setup_sm_context(dev);
+ if (ret)
+ goto free_table;
+ }
}
intel_iommu_debugfs_create_dev(info);
return &iommu->iommu;
+free_table:
+ intel_pasid_free_table(dev);
clear_rbtree:
device_rbtree_remove(info);
free:
@@ -4562,23 +4481,15 @@ static int intel_iommu_enable_iopf(struct device *dev)
if (ret)
return ret;
- ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
- if (ret)
- goto iopf_remove_device;
-
ret = pci_enable_pri(pdev, PRQ_DEPTH);
- if (ret)
- goto iopf_unregister_handler;
+ if (ret) {
+ iopf_queue_remove_device(iommu->iopf_queue, dev);
+ return ret;
+ }
+
info->pri_enabled = 1;
return 0;
-
-iopf_unregister_handler:
- iommu_unregister_device_fault_handler(dev);
-iopf_remove_device:
- iopf_queue_remove_device(iommu->iopf_queue, dev);
-
- return ret;
}
static int intel_iommu_disable_iopf(struct device *dev)
@@ -4599,14 +4510,7 @@ static int intel_iommu_disable_iopf(struct device *dev)
*/
pci_disable_pri(to_pci_dev(dev));
info->pri_enabled = 0;
-
- /*
- * With PRI disabled and outstanding PRQs drained, unregistering
- * fault handler and removing device from iopf queue should never
- * fail.
- */
- WARN_ON(iommu_unregister_device_fault_handler(dev));
- WARN_ON(iopf_queue_remove_device(iommu->iopf_queue, dev));
+ iopf_queue_remove_device(iommu->iopf_queue, dev);
return 0;
}
@@ -4964,7 +4868,7 @@ static void quirk_iommu_igfx(struct pci_dev *dev)
return;
pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
- dmar_map_gfx = 0;
+ disable_igfx_iommu = 1;
}
/* G4x/GM45 integrated gfx dmar support is totally busted. */
@@ -5045,8 +4949,8 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
- dmar_map_gfx = 0;
- } else if (dmar_map_gfx) {
+ disable_igfx_iommu = 1;
+ } else if (!disable_igfx_iommu) {
/* we have to ensure the gfx device is idle before we flush */
pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
iommu_set_dma_strict();
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index cd267ba64e..404d2476a8 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -1096,8 +1096,8 @@ struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid);
void intel_svm_check(struct intel_iommu *iommu);
int intel_svm_enable_prq(struct intel_iommu *iommu);
int intel_svm_finish_prq(struct intel_iommu *iommu);
-int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
- struct iommu_page_response *msg);
+void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
+ struct iommu_page_response *msg);
struct iommu_domain *intel_svm_domain_alloc(void);
void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
void intel_drain_pasid_prq(struct device *dev, u32 pasid);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index a51e895d9a..11f0b856d7 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -734,3 +734,141 @@ void intel_pasid_teardown_sm_context(struct device *dev)
pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_teardown, dev);
}
+
+/*
+ * Get the PASID directory size for scalable mode context entry.
+ * Value of X in the PDTS field of a scalable mode context entry
+ * indicates PASID directory with 2^(X + 7) entries.
+ */
+static unsigned long context_get_sm_pds(struct pasid_table *table)
+{
+ unsigned long pds, max_pde;
+
+ max_pde = table->max_pasid >> PASID_PDE_SHIFT;
+ pds = find_first_bit(&max_pde, MAX_NR_PASID_BITS);
+ if (pds < 7)
+ return 0;
+
+ return pds - 7;
+}
+
+static int context_entry_set_pasid_table(struct context_entry *context,
+ struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct pasid_table *table = info->pasid_table;
+ struct intel_iommu *iommu = info->iommu;
+ unsigned long pds;
+
+ context_clear_entry(context);
+
+ pds = context_get_sm_pds(table);
+ context->lo = (u64)virt_to_phys(table->table) | context_pdts(pds);
+ context_set_sm_rid2pasid(context, IOMMU_NO_PASID);
+
+ if (info->ats_supported)
+ context_set_sm_dte(context);
+ if (info->pri_supported)
+ context_set_sm_pre(context);
+ if (info->pasid_supported)
+ context_set_pasid(context);
+
+ context_set_fault_enable(context);
+ context_set_present(context);
+ __iommu_flush_cache(iommu, context, sizeof(*context));
+
+ return 0;
+}
+
+static int device_pasid_table_setup(struct device *dev, u8 bus, u8 devfn)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct context_entry *context;
+
+ spin_lock(&iommu->lock);
+ context = iommu_context_addr(iommu, bus, devfn, true);
+ if (!context) {
+ spin_unlock(&iommu->lock);
+ return -ENOMEM;
+ }
+
+ if (context_present(context) && !context_copied(iommu, bus, devfn)) {
+ spin_unlock(&iommu->lock);
+ return 0;
+ }
+
+ if (context_copied(iommu, bus, devfn)) {
+ context_clear_entry(context);
+ __iommu_flush_cache(iommu, context, sizeof(*context));
+
+ /*
+ * For kdump cases, old valid entries may be cached due to
+ * the in-flight DMA and copied pgtable, but there is no
+ * unmapping behaviour for them, thus we need explicit cache
+ * flushes for all affected domain IDs and PASIDs used in
+ * the copied PASID table. Given that we have no idea about
+ * which domain IDs and PASIDs were used in the copied tables,
+ * upgrade them to global PASID and IOTLB cache invalidation.
+ */
+ iommu->flush.flush_context(iommu, 0,
+ PCI_DEVID(bus, devfn),
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
+ qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID);
+
+ /*
+ * At this point, the device is supposed to finish reset at
+ * its driver probe stage, so no in-flight DMA will exist,
+ * and we don't need to worry anymore hereafter.
+ */
+ clear_context_copied(iommu, bus, devfn);
+ }
+
+ context_entry_set_pasid_table(context, dev);
+ spin_unlock(&iommu->lock);
+
+ /*
+ * It's a non-present to present mapping. If hardware doesn't cache
+ * non-present entry we don't need to flush the caches. If it does
+ * cache non-present entries, then it does so in the special
+ * domain #0, which we have to flush:
+ */
+ if (cap_caching_mode(iommu->cap)) {
+ iommu->flush.flush_context(iommu, 0,
+ PCI_DEVID(bus, devfn),
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
+ }
+
+ return 0;
+}
+
+static int pci_pasid_table_setup(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct device *dev = data;
+
+ if (dev != &pdev->dev)
+ return 0;
+
+ return device_pasid_table_setup(dev, PCI_BUS_NUM(alias), alias & 0xff);
+}
+
+/*
+ * Set the device's PASID table to its context table entry.
+ *
+ * The PASID table is set to the context entries of both device itself
+ * and its alias requester ID for DMA.
+ */
+int intel_pasid_setup_sm_context(struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ if (!dev_is_pci(dev))
+ return device_pasid_table_setup(dev, info->bus, info->devfn);
+
+ return pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_setup, dev);
+}
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 42fda97fd8..da9978fef7 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -318,5 +318,6 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
bool fault_ignore);
void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
struct device *dev, u32 pasid);
+int intel_pasid_setup_sm_context(struct device *dev);
void intel_pasid_teardown_sm_context(struct device *dev);
#endif /* __INTEL_PASID_H */
diff --git a/drivers/iommu/intel/perf.c b/drivers/iommu/intel/perf.c
index 94ee70ac38..adc4de6bbd 100644
--- a/drivers/iommu/intel/perf.c
+++ b/drivers/iommu/intel/perf.c
@@ -33,7 +33,7 @@ int dmar_latency_enable(struct intel_iommu *iommu, enum latency_type type)
spin_lock_irqsave(&latency_lock, flags);
if (!iommu->perf_statistic) {
- iommu->perf_statistic = kzalloc(sizeof(*lstat) * DMAR_LATENCY_NUM,
+ iommu->perf_statistic = kcalloc(DMAR_LATENCY_NUM, sizeof(*lstat),
GFP_ATOMIC);
if (!iommu->perf_statistic) {
ret = -ENOMEM;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 4d269df008..ee3b469e2d 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -22,7 +22,6 @@
#include "iommu.h"
#include "pasid.h"
#include "perf.h"
-#include "../iommu-sva.h"
#include "trace.h"
static irqreturn_t prq_event_thread(int irq, void *d);
@@ -315,10 +314,11 @@ out:
return 0;
}
-static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
- struct iommu_domain *domain, ioasid_t pasid)
+static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
struct mm_struct *mm = domain->mm;
struct intel_svm_dev *sdev;
struct intel_svm *svm;
@@ -360,7 +360,6 @@ static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
sdev->iommu = iommu;
sdev->did = FLPT_DEFAULT_DID;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
- init_rcu_head(&sdev->rcu);
if (info->ats_enabled) {
sdev->qdep = info->ats_qdep;
if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
@@ -408,13 +407,6 @@ void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
if (svm->notifier.ops)
mmu_notifier_unregister(&svm->notifier, mm);
pasid_private_remove(svm->pasid);
- /*
- * We mandate that no page faults may be outstanding
- * for the PASID when intel_svm_unbind_mm() is called.
- * If that is not obeyed, subtle errors will happen.
- * Let's make them less subtle...
- */
- memset(svm, 0x6b, sizeof(*svm));
kfree(svm);
}
}
@@ -562,16 +554,12 @@ static int prq_to_iommu_prot(struct page_req_dsc *req)
return prot;
}
-static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
- struct page_req_dsc *desc)
+static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
+ struct page_req_dsc *desc)
{
- struct iommu_fault_event event;
-
- if (!dev || !dev_is_pci(dev))
- return -ENODEV;
+ struct iopf_fault event = { };
/* Fill in event data for device specific processing */
- memset(&event, 0, sizeof(struct iommu_fault_event));
event.fault.type = IOMMU_FAULT_PAGE_REQ;
event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
event.fault.prm.pasid = desc->pasid;
@@ -603,7 +591,7 @@ static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
event.fault.prm.private_data[0] = ktime_to_ns(ktime_get());
}
- return iommu_report_device_fault(dev, &event);
+ iommu_report_device_fault(dev, &event);
}
static void handle_bad_prq_event(struct intel_iommu *iommu,
@@ -707,12 +695,10 @@ bad_req:
goto bad_req;
}
- if (intel_svm_prq_report(iommu, dev, req))
- handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
- else
- trace_prq_report(iommu, dev, req->qw_0, req->qw_1,
- req->priv_data[0], req->priv_data[1],
- iommu->prq_seq_number++);
+ intel_svm_prq_report(iommu, dev, req);
+ trace_prq_report(iommu, dev, req->qw_0, req->qw_1,
+ req->priv_data[0], req->priv_data[1],
+ iommu->prq_seq_number++);
mutex_unlock(&iommu->iopf_lock);
prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK;
@@ -743,9 +729,8 @@ prq_advance:
return IRQ_RETVAL(handled);
}
-int intel_svm_page_response(struct device *dev,
- struct iommu_fault_event *evt,
- struct iommu_page_response *msg)
+void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
+ struct iommu_page_response *msg)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
@@ -754,7 +739,6 @@ int intel_svm_page_response(struct device *dev,
bool private_present;
bool pasid_present;
bool last_page;
- int ret = 0;
u16 sid;
prm = &evt->fault.prm;
@@ -763,16 +747,6 @@ int intel_svm_page_response(struct device *dev,
private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
- if (!pasid_present) {
- ret = -EINVAL;
- goto out;
- }
-
- if (prm->pasid == 0 || prm->pasid >= PASID_MAX) {
- ret = -EINVAL;
- goto out;
- }
-
/*
* Per VT-d spec. v3.0 ch7.7, system software must respond
* with page group response if private data is present (PDP)
@@ -801,17 +775,6 @@ int intel_svm_page_response(struct device *dev,
qi_submit_sync(iommu, &desc, 1, 0);
}
-out:
- return ret;
-}
-
-static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
- struct device *dev, ioasid_t pasid)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu = info->iommu;
-
- return intel_svm_bind_mm(iommu, dev, domain, pasid);
}
static void intel_svm_domain_free(struct iommu_domain *domain)
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
index e5b8b9110c..06d78fcc79 100644
--- a/drivers/iommu/io-pgfault.c
+++ b/drivers/iommu/io-pgfault.c
@@ -11,101 +11,140 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include "iommu-sva.h"
+#include "iommu-priv.h"
-/**
- * struct iopf_queue - IO Page Fault queue
- * @wq: the fault workqueue
- * @devices: devices attached to this queue
- * @lock: protects the device list
+/*
+ * Return the fault parameter of a device if it exists. Otherwise, return NULL.
+ * On a successful return, the caller takes a reference of this parameter and
+ * should put it after use by calling iopf_put_dev_fault_param().
*/
-struct iopf_queue {
- struct workqueue_struct *wq;
- struct list_head devices;
- struct mutex lock;
-};
+static struct iommu_fault_param *iopf_get_dev_fault_param(struct device *dev)
+{
+ struct dev_iommu *param = dev->iommu;
+ struct iommu_fault_param *fault_param;
-/**
- * struct iopf_device_param - IO Page Fault data attached to a device
- * @dev: the device that owns this param
- * @queue: IOPF queue
- * @queue_list: index into queue->devices
- * @partial: faults that are part of a Page Request Group for which the last
- * request hasn't been submitted yet.
- */
-struct iopf_device_param {
- struct device *dev;
- struct iopf_queue *queue;
- struct list_head queue_list;
- struct list_head partial;
-};
-
-struct iopf_fault {
- struct iommu_fault fault;
- struct list_head list;
-};
-
-struct iopf_group {
- struct iopf_fault last_fault;
- struct list_head faults;
- struct work_struct work;
- struct device *dev;
-};
-
-static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
- enum iommu_page_response_code status)
+ rcu_read_lock();
+ fault_param = rcu_dereference(param->fault_param);
+ if (fault_param && !refcount_inc_not_zero(&fault_param->users))
+ fault_param = NULL;
+ rcu_read_unlock();
+
+ return fault_param;
+}
+
+/* Caller must hold a reference of the fault parameter. */
+static void iopf_put_dev_fault_param(struct iommu_fault_param *fault_param)
{
- struct iommu_page_response resp = {
- .version = IOMMU_PAGE_RESP_VERSION_1,
- .pasid = iopf->fault.prm.pasid,
- .grpid = iopf->fault.prm.grpid,
- .code = status,
- };
+ if (refcount_dec_and_test(&fault_param->users))
+ kfree_rcu(fault_param, rcu);
+}
- if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
- (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
- resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
+static void __iopf_free_group(struct iopf_group *group)
+{
+ struct iopf_fault *iopf, *next;
- return iommu_page_response(dev, &resp);
+ list_for_each_entry_safe(iopf, next, &group->faults, list) {
+ if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
+ kfree(iopf);
+ }
+
+ /* Pair with iommu_report_device_fault(). */
+ iopf_put_dev_fault_param(group->fault_param);
}
-static void iopf_handler(struct work_struct *work)
+void iopf_free_group(struct iopf_group *group)
+{
+ __iopf_free_group(group);
+ kfree(group);
+}
+EXPORT_SYMBOL_GPL(iopf_free_group);
+
+static struct iommu_domain *get_domain_for_iopf(struct device *dev,
+ struct iommu_fault *fault)
{
- struct iopf_group *group;
struct iommu_domain *domain;
- struct iopf_fault *iopf, *next;
- enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
- group = container_of(work, struct iopf_group, work);
- domain = iommu_get_domain_for_dev_pasid(group->dev,
- group->last_fault.fault.prm.pasid, 0);
- if (!domain || !domain->iopf_handler)
- status = IOMMU_PAGE_RESP_INVALID;
+ if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) {
+ domain = iommu_get_domain_for_dev_pasid(dev, fault->prm.pasid, 0);
+ if (IS_ERR(domain))
+ domain = NULL;
+ } else {
+ domain = iommu_get_domain_for_dev(dev);
+ }
- list_for_each_entry_safe(iopf, next, &group->faults, list) {
+ if (!domain || !domain->iopf_handler) {
+ dev_warn_ratelimited(dev,
+ "iopf (pasid %d) without domain attached or handler installed\n",
+ fault->prm.pasid);
+
+ return NULL;
+ }
+
+ return domain;
+}
+
+/* Non-last request of a group. Postpone until the last one. */
+static int report_partial_fault(struct iommu_fault_param *fault_param,
+ struct iommu_fault *fault)
+{
+ struct iopf_fault *iopf;
+
+ iopf = kzalloc(sizeof(*iopf), GFP_KERNEL);
+ if (!iopf)
+ return -ENOMEM;
+
+ iopf->fault = *fault;
+
+ mutex_lock(&fault_param->lock);
+ list_add(&iopf->list, &fault_param->partial);
+ mutex_unlock(&fault_param->lock);
+
+ return 0;
+}
+
+static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,
+ struct iopf_fault *evt,
+ struct iopf_group *abort_group)
+{
+ struct iopf_fault *iopf, *next;
+ struct iopf_group *group;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group) {
/*
- * For the moment, errors are sticky: don't handle subsequent
- * faults in the group if there is an error.
+ * We always need to construct the group as we need it to abort
+ * the request at the driver if it can't be handled.
*/
- if (status == IOMMU_PAGE_RESP_SUCCESS)
- status = domain->iopf_handler(&iopf->fault,
- domain->fault_data);
+ group = abort_group;
+ }
- if (!(iopf->fault.prm.flags &
- IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
- kfree(iopf);
+ group->fault_param = iopf_param;
+ group->last_fault.fault = evt->fault;
+ INIT_LIST_HEAD(&group->faults);
+ INIT_LIST_HEAD(&group->pending_node);
+ list_add(&group->last_fault.list, &group->faults);
+
+ /* See if we have partial faults for this group */
+ mutex_lock(&iopf_param->lock);
+ list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
+ if (iopf->fault.prm.grpid == evt->fault.prm.grpid)
+ /* Insert *before* the last fault */
+ list_move(&iopf->list, &group->faults);
}
+ list_add(&group->pending_node, &iopf_param->faults);
+ mutex_unlock(&iopf_param->lock);
- iopf_complete_group(group->dev, &group->last_fault, status);
- kfree(group);
+ return group;
}
/**
- * iommu_queue_iopf - IO Page Fault handler
- * @fault: fault event
- * @cookie: struct device, passed to iommu_register_device_fault_handler.
+ * iommu_report_device_fault() - Report fault event to device driver
+ * @dev: the device
+ * @evt: fault event data
*
- * Add a fault to the device workqueue, to be handled by mm.
+ * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
+ * handler. If this function fails then ops->page_response() was called to
+ * complete evt if required.
*
* This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard
* them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't
@@ -137,83 +176,57 @@ static void iopf_handler(struct work_struct *work)
* freed after the device has stopped generating page faults (or the iommu
* hardware has been set to block the page faults) and the pending page faults
* have been flushed.
- *
- * Return: 0 on success and <0 on error.
*/
-int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
+void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
{
- int ret;
+ struct iommu_fault *fault = &evt->fault;
+ struct iommu_fault_param *iopf_param;
+ struct iopf_group abort_group = {};
struct iopf_group *group;
- struct iopf_fault *iopf, *next;
- struct iopf_device_param *iopf_param;
- struct device *dev = cookie;
- struct dev_iommu *param = dev->iommu;
-
- lockdep_assert_held(&param->lock);
-
- if (fault->type != IOMMU_FAULT_PAGE_REQ)
- /* Not a recoverable page fault */
- return -EOPNOTSUPP;
-
- /*
- * As long as we're holding param->lock, the queue can't be unlinked
- * from the device and therefore cannot disappear.
- */
- iopf_param = param->iopf_param;
- if (!iopf_param)
- return -ENODEV;
+ iopf_param = iopf_get_dev_fault_param(dev);
+ if (WARN_ON(!iopf_param))
+ return;
if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
- iopf = kzalloc(sizeof(*iopf), GFP_KERNEL);
- if (!iopf)
- return -ENOMEM;
-
- iopf->fault = *fault;
-
- /* Non-last request of a group. Postpone until the last one */
- list_add(&iopf->list, &iopf_param->partial);
-
- return 0;
+ report_partial_fault(iopf_param, fault);
+ iopf_put_dev_fault_param(iopf_param);
+ /* A request that is not the last does not need to be ack'd */
}
- group = kzalloc(sizeof(*group), GFP_KERNEL);
- if (!group) {
- /*
- * The caller will send a response to the hardware. But we do
- * need to clean up before leaving, otherwise partial faults
- * will be stuck.
- */
- ret = -ENOMEM;
- goto cleanup_partial;
- }
+ /*
+ * This is the last page fault of a group. Allocate an iopf group and
+ * pass it to domain's page fault handler. The group holds a reference
+ * count of the fault parameter. It will be released after response or
+ * error path of this function. If an error is returned, the caller
+ * will send a response to the hardware. We need to clean up before
+ * leaving, otherwise partial faults will be stuck.
+ */
+ group = iopf_group_alloc(iopf_param, evt, &abort_group);
+ if (group == &abort_group)
+ goto err_abort;
- group->dev = dev;
- group->last_fault.fault = *fault;
- INIT_LIST_HEAD(&group->faults);
- list_add(&group->last_fault.list, &group->faults);
- INIT_WORK(&group->work, iopf_handler);
+ group->domain = get_domain_for_iopf(dev, fault);
+ if (!group->domain)
+ goto err_abort;
- /* See if we have partial faults for this group */
- list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
- if (iopf->fault.prm.grpid == fault->prm.grpid)
- /* Insert *before* the last fault */
- list_move(&iopf->list, &group->faults);
- }
+ /*
+ * On success iopf_handler must call iopf_group_response() and
+ * iopf_free_group()
+ */
+ if (group->domain->iopf_handler(group))
+ goto err_abort;
- queue_work(iopf_param->queue->wq, &group->work);
- return 0;
+ return;
-cleanup_partial:
- list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
- if (iopf->fault.prm.grpid == fault->prm.grpid) {
- list_del(&iopf->list);
- kfree(iopf);
- }
- }
- return ret;
+err_abort:
+ iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE);
+ if (group == &abort_group)
+ __iopf_free_group(group);
+ else
+ iopf_free_group(group);
}
-EXPORT_SYMBOL_GPL(iommu_queue_iopf);
+EXPORT_SYMBOL_GPL(iommu_report_device_fault);
/**
* iopf_queue_flush_dev - Ensure that all queued faults have been processed
@@ -229,26 +242,52 @@ EXPORT_SYMBOL_GPL(iommu_queue_iopf);
*/
int iopf_queue_flush_dev(struct device *dev)
{
- int ret = 0;
- struct iopf_device_param *iopf_param;
- struct dev_iommu *param = dev->iommu;
+ struct iommu_fault_param *iopf_param;
- if (!param)
+ /*
+ * It's a driver bug to be here after iopf_queue_remove_device().
+ * Therefore, it's safe to dereference the fault parameter without
+ * holding the lock.
+ */
+ iopf_param = rcu_dereference_check(dev->iommu->fault_param, true);
+ if (WARN_ON(!iopf_param))
return -ENODEV;
- mutex_lock(&param->lock);
- iopf_param = param->iopf_param;
- if (iopf_param)
- flush_workqueue(iopf_param->queue->wq);
- else
- ret = -ENODEV;
- mutex_unlock(&param->lock);
+ flush_workqueue(iopf_param->queue->wq);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
/**
+ * iopf_group_response - Respond a group of page faults
+ * @group: the group of faults with the same group id
+ * @status: the response code
+ */
+void iopf_group_response(struct iopf_group *group,
+ enum iommu_page_response_code status)
+{
+ struct iommu_fault_param *fault_param = group->fault_param;
+ struct iopf_fault *iopf = &group->last_fault;
+ struct device *dev = group->fault_param->dev;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+ struct iommu_page_response resp = {
+ .pasid = iopf->fault.prm.pasid,
+ .grpid = iopf->fault.prm.grpid,
+ .code = status,
+ };
+
+ /* Only send response if there is a fault report pending */
+ mutex_lock(&fault_param->lock);
+ if (!list_empty(&group->pending_node)) {
+ ops->page_response(dev, &group->last_fault, &resp);
+ list_del_init(&group->pending_node);
+ }
+ mutex_unlock(&fault_param->lock);
+}
+EXPORT_SYMBOL_GPL(iopf_group_response);
+
+/**
* iopf_queue_discard_partial - Remove all pending partial fault
* @queue: the queue whose partial faults need to be discarded
*
@@ -261,18 +300,20 @@ EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
int iopf_queue_discard_partial(struct iopf_queue *queue)
{
struct iopf_fault *iopf, *next;
- struct iopf_device_param *iopf_param;
+ struct iommu_fault_param *iopf_param;
if (!queue)
return -EINVAL;
mutex_lock(&queue->lock);
list_for_each_entry(iopf_param, &queue->devices, queue_list) {
+ mutex_lock(&iopf_param->lock);
list_for_each_entry_safe(iopf, next, &iopf_param->partial,
list) {
list_del(&iopf->list);
kfree(iopf);
}
+ mutex_unlock(&iopf_param->lock);
}
mutex_unlock(&queue->lock);
return 0;
@@ -288,34 +329,42 @@ EXPORT_SYMBOL_GPL(iopf_queue_discard_partial);
*/
int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
{
- int ret = -EBUSY;
- struct iopf_device_param *iopf_param;
+ int ret = 0;
struct dev_iommu *param = dev->iommu;
+ struct iommu_fault_param *fault_param;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- if (!param)
+ if (!ops->page_response)
return -ENODEV;
- iopf_param = kzalloc(sizeof(*iopf_param), GFP_KERNEL);
- if (!iopf_param)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&iopf_param->partial);
- iopf_param->queue = queue;
- iopf_param->dev = dev;
-
mutex_lock(&queue->lock);
mutex_lock(&param->lock);
- if (!param->iopf_param) {
- list_add(&iopf_param->queue_list, &queue->devices);
- param->iopf_param = iopf_param;
- ret = 0;
+ if (rcu_dereference_check(param->fault_param,
+ lockdep_is_held(&param->lock))) {
+ ret = -EBUSY;
+ goto done_unlock;
+ }
+
+ fault_param = kzalloc(sizeof(*fault_param), GFP_KERNEL);
+ if (!fault_param) {
+ ret = -ENOMEM;
+ goto done_unlock;
}
+
+ mutex_init(&fault_param->lock);
+ INIT_LIST_HEAD(&fault_param->faults);
+ INIT_LIST_HEAD(&fault_param->partial);
+ fault_param->dev = dev;
+ refcount_set(&fault_param->users, 1);
+ list_add(&fault_param->queue_list, &queue->devices);
+ fault_param->queue = queue;
+
+ rcu_assign_pointer(param->fault_param, fault_param);
+
+done_unlock:
mutex_unlock(&param->lock);
mutex_unlock(&queue->lock);
- if (ret)
- kfree(iopf_param);
-
return ret;
}
EXPORT_SYMBOL_GPL(iopf_queue_add_device);
@@ -325,40 +374,66 @@ EXPORT_SYMBOL_GPL(iopf_queue_add_device);
* @queue: IOPF queue
* @dev: device to remove
*
- * Caller makes sure that no more faults are reported for this device.
+ * Removing a device from an iopf_queue. It's recommended to follow these
+ * steps when removing a device:
*
- * Return: 0 on success and <0 on error.
+ * - Disable new PRI reception: Turn off PRI generation in the IOMMU hardware
+ * and flush any hardware page request queues. This should be done before
+ * calling into this helper.
+ * - Acknowledge all outstanding PRQs to the device: Respond to all outstanding
+ * page requests with IOMMU_PAGE_RESP_INVALID, indicating the device should
+ * not retry. This helper function handles this.
+ * - Disable PRI on the device: After calling this helper, the caller could
+ * then disable PRI on the device.
+ *
+ * Calling iopf_queue_remove_device() essentially disassociates the device.
+ * The fault_param might still exist, but iommu_page_response() will do
+ * nothing. The device fault parameter reference count has been properly
+ * passed from iommu_report_device_fault() to the fault handling work, and
+ * will eventually be released after iommu_page_response().
*/
-int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
+void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
{
- int ret = -EINVAL;
- struct iopf_fault *iopf, *next;
- struct iopf_device_param *iopf_param;
+ struct iopf_fault *partial_iopf;
+ struct iopf_fault *next;
+ struct iopf_group *group, *temp;
struct dev_iommu *param = dev->iommu;
-
- if (!param || !queue)
- return -EINVAL;
+ struct iommu_fault_param *fault_param;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
mutex_lock(&queue->lock);
mutex_lock(&param->lock);
- iopf_param = param->iopf_param;
- if (iopf_param && iopf_param->queue == queue) {
- list_del(&iopf_param->queue_list);
- param->iopf_param = NULL;
- ret = 0;
+ fault_param = rcu_dereference_check(param->fault_param,
+ lockdep_is_held(&param->lock));
+
+ if (WARN_ON(!fault_param || fault_param->queue != queue))
+ goto unlock;
+
+ mutex_lock(&fault_param->lock);
+ list_for_each_entry_safe(partial_iopf, next, &fault_param->partial, list)
+ kfree(partial_iopf);
+
+ list_for_each_entry_safe(group, temp, &fault_param->faults, pending_node) {
+ struct iopf_fault *iopf = &group->last_fault;
+ struct iommu_page_response resp = {
+ .pasid = iopf->fault.prm.pasid,
+ .grpid = iopf->fault.prm.grpid,
+ .code = IOMMU_PAGE_RESP_INVALID
+ };
+
+ ops->page_response(dev, iopf, &resp);
+ list_del_init(&group->pending_node);
}
- mutex_unlock(&param->lock);
- mutex_unlock(&queue->lock);
- if (ret)
- return ret;
+ mutex_unlock(&fault_param->lock);
- /* Just in case some faults are still stuck */
- list_for_each_entry_safe(iopf, next, &iopf_param->partial, list)
- kfree(iopf);
+ list_del(&fault_param->queue_list);
- kfree(iopf_param);
-
- return 0;
+ /* dec the ref owned by iopf_queue_add_device() */
+ rcu_assign_pointer(param->fault_param, NULL);
+ iopf_put_dev_fault_param(fault_param);
+unlock:
+ mutex_unlock(&param->lock);
+ mutex_unlock(&queue->lock);
}
EXPORT_SYMBOL_GPL(iopf_queue_remove_device);
@@ -404,7 +479,7 @@ EXPORT_SYMBOL_GPL(iopf_queue_alloc);
*/
void iopf_queue_free(struct iopf_queue *queue)
{
- struct iopf_device_param *iopf_param, *next;
+ struct iommu_fault_param *iopf_param, *next;
if (!queue)
return;
diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h
index 2024a23133..5f731d9948 100644
--- a/drivers/iommu/iommu-priv.h
+++ b/drivers/iommu/iommu-priv.h
@@ -21,10 +21,11 @@ int iommu_group_replace_domain(struct iommu_group *group,
struct iommu_domain *new_domain);
int iommu_device_register_bus(struct iommu_device *iommu,
- const struct iommu_ops *ops, struct bus_type *bus,
+ const struct iommu_ops *ops,
+ const struct bus_type *bus,
struct notifier_block *nb);
void iommu_device_unregister_bus(struct iommu_device *iommu,
- struct bus_type *bus,
+ const struct bus_type *bus,
struct notifier_block *nb);
#endif /* __LINUX_IOMMU_PRIV_H */
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index 65814cbc84..640acc804e 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -7,7 +7,7 @@
#include <linux/sched/mm.h>
#include <linux/iommu.h>
-#include "iommu-sva.h"
+#include "iommu-priv.h"
static DEFINE_MUTEX(iommu_sva_lock);
@@ -176,15 +176,25 @@ u32 iommu_sva_get_pasid(struct iommu_sva *handle)
}
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
+void mm_pasid_drop(struct mm_struct *mm)
+{
+ struct iommu_mm_data *iommu_mm = mm->iommu_mm;
+
+ if (!iommu_mm)
+ return;
+
+ iommu_free_global_pasid(iommu_mm->pasid);
+ kfree(iommu_mm);
+}
+
/*
* I/O page fault handler for SVA
*/
-enum iommu_page_response_code
-iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
+static enum iommu_page_response_code
+iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm)
{
vm_fault_t ret;
struct vm_area_struct *vma;
- struct mm_struct *mm = data;
unsigned int access_flags = 0;
unsigned int fault_flags = FAULT_FLAG_REMOTE;
struct iommu_fault_page_request *prm = &fault->prm;
@@ -234,13 +244,54 @@ out_put_mm:
return status;
}
-void mm_pasid_drop(struct mm_struct *mm)
+static void iommu_sva_handle_iopf(struct work_struct *work)
{
- struct iommu_mm_data *iommu_mm = mm->iommu_mm;
+ struct iopf_fault *iopf;
+ struct iopf_group *group;
+ enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
+
+ group = container_of(work, struct iopf_group, work);
+ list_for_each_entry(iopf, &group->faults, list) {
+ /*
+ * For the moment, errors are sticky: don't handle subsequent
+ * faults in the group if there is an error.
+ */
+ if (status != IOMMU_PAGE_RESP_SUCCESS)
+ break;
+
+ status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm);
+ }
- if (!iommu_mm)
- return;
+ iopf_group_response(group, status);
+ iopf_free_group(group);
+}
- iommu_free_global_pasid(iommu_mm->pasid);
- kfree(iommu_mm);
+static int iommu_sva_iopf_handler(struct iopf_group *group)
+{
+ struct iommu_fault_param *fault_param = group->fault_param;
+
+ INIT_WORK(&group->work, iommu_sva_handle_iopf);
+ if (!queue_work(fault_param->queue->wq, &group->work))
+ return -EBUSY;
+
+ return 0;
+}
+
+struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
+ struct mm_struct *mm)
+{
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+ struct iommu_domain *domain;
+
+ domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
+ if (!domain)
+ return NULL;
+
+ domain->type = IOMMU_DOMAIN_SVA;
+ mmgrab(mm);
+ domain->mm = mm;
+ domain->owner = ops;
+ domain->iopf_handler = iommu_sva_iopf_handler;
+
+ return domain;
}
diff --git a/drivers/iommu/iommu-sva.h b/drivers/iommu/iommu-sva.h
deleted file mode 100644
index 54946b5a7c..0000000000
--- a/drivers/iommu/iommu-sva.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * SVA library for IOMMU drivers
- */
-#ifndef _IOMMU_SVA_H
-#define _IOMMU_SVA_H
-
-#include <linux/mm_types.h>
-
-/* I/O Page fault */
-struct device;
-struct iommu_fault;
-struct iopf_queue;
-
-#ifdef CONFIG_IOMMU_SVA
-int iommu_queue_iopf(struct iommu_fault *fault, void *cookie);
-
-int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
-int iopf_queue_remove_device(struct iopf_queue *queue,
- struct device *dev);
-int iopf_queue_flush_dev(struct device *dev);
-struct iopf_queue *iopf_queue_alloc(const char *name);
-void iopf_queue_free(struct iopf_queue *queue);
-int iopf_queue_discard_partial(struct iopf_queue *queue);
-enum iommu_page_response_code
-iommu_sva_handle_iopf(struct iommu_fault *fault, void *data);
-
-#else /* CONFIG_IOMMU_SVA */
-static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
-{
- return -ENODEV;
-}
-
-static inline int iopf_queue_add_device(struct iopf_queue *queue,
- struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline int iopf_queue_remove_device(struct iopf_queue *queue,
- struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline int iopf_queue_flush_dev(struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline struct iopf_queue *iopf_queue_alloc(const char *name)
-{
- return NULL;
-}
-
-static inline void iopf_queue_free(struct iopf_queue *queue)
-{
-}
-
-static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
-{
- return -ENODEV;
-}
-
-static inline enum iommu_page_response_code
-iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
-{
- return IOMMU_PAGE_RESP_INVALID;
-}
-#endif /* CONFIG_IOMMU_SVA */
-#endif /* _IOMMU_SVA_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index e606d250d1..659a77f7bb 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -36,8 +36,6 @@
#include "dma-iommu.h"
#include "iommu-priv.h"
-#include "iommu-sva.h"
-
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
static DEFINE_IDA(iommu_global_pasid_ida);
@@ -291,7 +289,7 @@ EXPORT_SYMBOL_GPL(iommu_device_unregister);
#if IS_ENABLED(CONFIG_IOMMUFD_TEST)
void iommu_device_unregister_bus(struct iommu_device *iommu,
- struct bus_type *bus,
+ const struct bus_type *bus,
struct notifier_block *nb)
{
bus_unregister_notifier(bus, nb);
@@ -305,7 +303,8 @@ EXPORT_SYMBOL_GPL(iommu_device_unregister_bus);
* some memory to hold a notifier_block.
*/
int iommu_device_register_bus(struct iommu_device *iommu,
- const struct iommu_ops *ops, struct bus_type *bus,
+ const struct iommu_ops *ops,
+ const struct bus_type *bus,
struct notifier_block *nb)
{
int err;
@@ -1259,6 +1258,25 @@ void iommu_group_remove_device(struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_group_remove_device);
+#if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
+/**
+ * iommu_group_mutex_assert - Check device group mutex lock
+ * @dev: the device that has group param set
+ *
+ * This function is called by an iommu driver to check whether it holds
+ * group mutex lock for the given device or not.
+ *
+ * Note that this function must be called after device group param is set.
+ */
+void iommu_group_mutex_assert(struct device *dev)
+{
+ struct iommu_group *group = dev->iommu_group;
+
+ lockdep_assert_held(&group->mutex);
+}
+EXPORT_SYMBOL_GPL(iommu_group_mutex_assert);
+#endif
+
static struct device *iommu_group_first_dev(struct iommu_group *group)
{
lockdep_assert_held(&group->mutex);
@@ -1342,217 +1360,6 @@ void iommu_group_put(struct iommu_group *group)
EXPORT_SYMBOL_GPL(iommu_group_put);
/**
- * iommu_register_device_fault_handler() - Register a device fault handler
- * @dev: the device
- * @handler: the fault handler
- * @data: private data passed as argument to the handler
- *
- * When an IOMMU fault event is received, this handler gets called with the
- * fault event and data as argument. The handler should return 0 on success. If
- * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
- * complete the fault by calling iommu_page_response() with one of the following
- * response code:
- * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
- * - IOMMU_PAGE_RESP_INVALID: terminate the fault
- * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
- * page faults if possible.
- *
- * Return 0 if the fault handler was installed successfully, or an error.
- */
-int iommu_register_device_fault_handler(struct device *dev,
- iommu_dev_fault_handler_t handler,
- void *data)
-{
- struct dev_iommu *param = dev->iommu;
- int ret = 0;
-
- if (!param)
- return -EINVAL;
-
- mutex_lock(&param->lock);
- /* Only allow one fault handler registered for each device */
- if (param->fault_param) {
- ret = -EBUSY;
- goto done_unlock;
- }
-
- get_device(dev);
- param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
- if (!param->fault_param) {
- put_device(dev);
- ret = -ENOMEM;
- goto done_unlock;
- }
- param->fault_param->handler = handler;
- param->fault_param->data = data;
- mutex_init(&param->fault_param->lock);
- INIT_LIST_HEAD(&param->fault_param->faults);
-
-done_unlock:
- mutex_unlock(&param->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
-
-/**
- * iommu_unregister_device_fault_handler() - Unregister the device fault handler
- * @dev: the device
- *
- * Remove the device fault handler installed with
- * iommu_register_device_fault_handler().
- *
- * Return 0 on success, or an error.
- */
-int iommu_unregister_device_fault_handler(struct device *dev)
-{
- struct dev_iommu *param = dev->iommu;
- int ret = 0;
-
- if (!param)
- return -EINVAL;
-
- mutex_lock(&param->lock);
-
- if (!param->fault_param)
- goto unlock;
-
- /* we cannot unregister handler if there are pending faults */
- if (!list_empty(&param->fault_param->faults)) {
- ret = -EBUSY;
- goto unlock;
- }
-
- kfree(param->fault_param);
- param->fault_param = NULL;
- put_device(dev);
-unlock:
- mutex_unlock(&param->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
-
-/**
- * iommu_report_device_fault() - Report fault event to device driver
- * @dev: the device
- * @evt: fault event data
- *
- * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
- * handler. When this function fails and the fault is recoverable, it is the
- * caller's responsibility to complete the fault.
- *
- * Return 0 on success, or an error.
- */
-int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
-{
- struct dev_iommu *param = dev->iommu;
- struct iommu_fault_event *evt_pending = NULL;
- struct iommu_fault_param *fparam;
- int ret = 0;
-
- if (!param || !evt)
- return -EINVAL;
-
- /* we only report device fault if there is a handler registered */
- mutex_lock(&param->lock);
- fparam = param->fault_param;
- if (!fparam || !fparam->handler) {
- ret = -EINVAL;
- goto done_unlock;
- }
-
- if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
- (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
- evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
- GFP_KERNEL);
- if (!evt_pending) {
- ret = -ENOMEM;
- goto done_unlock;
- }
- mutex_lock(&fparam->lock);
- list_add_tail(&evt_pending->list, &fparam->faults);
- mutex_unlock(&fparam->lock);
- }
-
- ret = fparam->handler(&evt->fault, fparam->data);
- if (ret && evt_pending) {
- mutex_lock(&fparam->lock);
- list_del(&evt_pending->list);
- mutex_unlock(&fparam->lock);
- kfree(evt_pending);
- }
-done_unlock:
- mutex_unlock(&param->lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_report_device_fault);
-
-int iommu_page_response(struct device *dev,
- struct iommu_page_response *msg)
-{
- bool needs_pasid;
- int ret = -EINVAL;
- struct iommu_fault_event *evt;
- struct iommu_fault_page_request *prm;
- struct dev_iommu *param = dev->iommu;
- const struct iommu_ops *ops = dev_iommu_ops(dev);
- bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
-
- if (!ops->page_response)
- return -ENODEV;
-
- if (!param || !param->fault_param)
- return -EINVAL;
-
- if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
- msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
- return -EINVAL;
-
- /* Only send response if there is a fault report pending */
- mutex_lock(&param->fault_param->lock);
- if (list_empty(&param->fault_param->faults)) {
- dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
- goto done_unlock;
- }
- /*
- * Check if we have a matching page request pending to respond,
- * otherwise return -EINVAL
- */
- list_for_each_entry(evt, &param->fault_param->faults, list) {
- prm = &evt->fault.prm;
- if (prm->grpid != msg->grpid)
- continue;
-
- /*
- * If the PASID is required, the corresponding request is
- * matched using the group ID, the PASID valid bit and the PASID
- * value. Otherwise only the group ID matches request and
- * response.
- */
- needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
- if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
- continue;
-
- if (!needs_pasid && has_pasid) {
- /* No big deal, just clear it. */
- msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
- msg->pasid = 0;
- }
-
- ret = ops->page_response(dev, evt, msg);
- list_del(&evt->list);
- kfree(evt);
- break;
- }
-
-done_unlock:
- mutex_unlock(&param->fault_param->lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_page_response);
-
-/**
* iommu_group_id - Return ID for a group
* @group: the group to ID
*
@@ -2997,7 +2804,7 @@ bool iommu_default_passthrough(void)
}
EXPORT_SYMBOL_GPL(iommu_default_passthrough);
-const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
+const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
{
const struct iommu_ops *ops = NULL;
struct iommu_device *iommu;
@@ -3048,7 +2855,7 @@ void iommu_fwspec_free(struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_fwspec_free);
-int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
+int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
int i, new_num;
@@ -3652,26 +3459,6 @@ struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev,
}
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid);
-struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
- struct mm_struct *mm)
-{
- const struct iommu_ops *ops = dev_iommu_ops(dev);
- struct iommu_domain *domain;
-
- domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
- if (!domain)
- return NULL;
-
- domain->type = IOMMU_DOMAIN_SVA;
- mmgrab(mm);
- domain->mm = mm;
- domain->owner = ops;
- domain->iopf_handler = iommu_sva_handle_iopf;
- domain->fault_data = mm;
-
- return domain;
-}
-
ioasid_t iommu_alloc_global_pasid(struct device *dev)
{
int ret;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index d30e453d0f..d59d0ea2fd 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -24,24 +24,8 @@ static bool iova_rcache_insert(struct iova_domain *iovad,
static unsigned long iova_rcache_get(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn);
-static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
-
-unsigned long iova_rcache_range(void)
-{
- return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
-}
-
-static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
-{
- struct iova_domain *iovad;
-
- iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
-
- free_cpu_cached_iovas(cpu, iovad);
- return 0;
-}
-
+static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_global_cached_iovas(struct iova_domain *iovad);
static struct iova *to_iova(struct rb_node *node)
@@ -252,54 +236,6 @@ static void free_iova_mem(struct iova *iova)
kmem_cache_free(iova_cache, iova);
}
-int iova_cache_get(void)
-{
- mutex_lock(&iova_cache_mutex);
- if (!iova_cache_users) {
- int ret;
-
- ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL,
- iova_cpuhp_dead);
- if (ret) {
- mutex_unlock(&iova_cache_mutex);
- pr_err("Couldn't register cpuhp handler\n");
- return ret;
- }
-
- iova_cache = kmem_cache_create(
- "iommu_iova", sizeof(struct iova), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!iova_cache) {
- cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
- mutex_unlock(&iova_cache_mutex);
- pr_err("Couldn't create iova cache\n");
- return -ENOMEM;
- }
- }
-
- iova_cache_users++;
- mutex_unlock(&iova_cache_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(iova_cache_get);
-
-void iova_cache_put(void)
-{
- mutex_lock(&iova_cache_mutex);
- if (WARN_ON(!iova_cache_users)) {
- mutex_unlock(&iova_cache_mutex);
- return;
- }
- iova_cache_users--;
- if (!iova_cache_users) {
- cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
- kmem_cache_destroy(iova_cache);
- }
- mutex_unlock(&iova_cache_mutex);
-}
-EXPORT_SYMBOL_GPL(iova_cache_put);
-
/**
* alloc_iova - allocates an iova
* @iovad: - iova domain in question
@@ -654,11 +590,18 @@ struct iova_rcache {
struct delayed_work work;
};
+static struct kmem_cache *iova_magazine_cache;
+
+unsigned long iova_rcache_range(void)
+{
+ return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
+}
+
static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
{
struct iova_magazine *mag;
- mag = kmalloc(sizeof(*mag), flags);
+ mag = kmem_cache_alloc(iova_magazine_cache, flags);
if (mag)
mag->size = 0;
@@ -667,7 +610,7 @@ static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
static void iova_magazine_free(struct iova_magazine *mag)
{
- kfree(mag);
+ kmem_cache_free(iova_magazine_cache, mag);
}
static void
@@ -990,5 +933,71 @@ static void free_global_cached_iovas(struct iova_domain *iovad)
spin_unlock_irqrestore(&rcache->lock, flags);
}
}
+
+static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
+{
+ struct iova_domain *iovad;
+
+ iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
+
+ free_cpu_cached_iovas(cpu, iovad);
+ return 0;
+}
+
+int iova_cache_get(void)
+{
+ int err = -ENOMEM;
+
+ mutex_lock(&iova_cache_mutex);
+ if (!iova_cache_users) {
+ iova_cache = kmem_cache_create("iommu_iova", sizeof(struct iova), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!iova_cache)
+ goto out_err;
+
+ iova_magazine_cache = kmem_cache_create("iommu_iova_magazine",
+ sizeof(struct iova_magazine),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!iova_magazine_cache)
+ goto out_err;
+
+ err = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead",
+ NULL, iova_cpuhp_dead);
+ if (err) {
+ pr_err("IOVA: Couldn't register cpuhp handler: %pe\n", ERR_PTR(err));
+ goto out_err;
+ }
+ }
+
+ iova_cache_users++;
+ mutex_unlock(&iova_cache_mutex);
+
+ return 0;
+
+out_err:
+ kmem_cache_destroy(iova_cache);
+ kmem_cache_destroy(iova_magazine_cache);
+ mutex_unlock(&iova_cache_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(iova_cache_get);
+
+void iova_cache_put(void)
+{
+ mutex_lock(&iova_cache_mutex);
+ if (WARN_ON(!iova_cache_users)) {
+ mutex_unlock(&iova_cache_mutex);
+ return;
+ }
+ iova_cache_users--;
+ if (!iova_cache_users) {
+ cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
+ kmem_cache_destroy(iova_cache);
+ kmem_cache_destroy(iova_magazine_cache);
+ }
+ mutex_unlock(&iova_cache_mutex);
+}
+EXPORT_SYMBOL_GPL(iova_cache_put);
+
MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index ace1fc4bd3..b657cc0960 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -709,7 +709,7 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
}
static int ipmmu_init_platform_device(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct platform_device *ipmmu_pdev;
@@ -773,7 +773,7 @@ static bool ipmmu_device_is_allowed(struct device *dev)
}
static int ipmmu_of_xlate(struct device *dev,
- struct of_phandle_args *spec)
+ const struct of_phandle_args *spec)
{
if (!ipmmu_device_is_allowed(dev))
return -ENODEV;
@@ -1005,7 +1005,6 @@ static const struct of_device_id ipmmu_of_ids[] = {
static int ipmmu_probe(struct platform_device *pdev)
{
struct ipmmu_vmsa_device *mmu;
- struct resource *res;
int irq;
int ret;
@@ -1025,8 +1024,7 @@ static int ipmmu_probe(struct platform_device *pdev)
return ret;
/* Map I/O memory and request IRQ. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmu->base = devm_ioremap_resource(&pdev->dev, res);
+ mmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmu->base))
return PTR_ERR(mmu->base);
@@ -1123,7 +1121,6 @@ static void ipmmu_remove(struct platform_device *pdev)
ipmmu_device_reset(mmu);
}
-#ifdef CONFIG_PM_SLEEP
static int ipmmu_resume_noirq(struct device *dev)
{
struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
@@ -1153,18 +1150,14 @@ static int ipmmu_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops ipmmu_pm = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
};
-#define DEV_PM_OPS &ipmmu_pm
-#else
-#define DEV_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
static struct platform_driver ipmmu_driver = {
.driver = {
.name = "ipmmu-vmsa",
- .of_match_table = of_match_ptr(ipmmu_of_ids),
- .pm = DEV_PM_OPS,
+ .of_match_table = ipmmu_of_ids,
+ .pm = pm_sleep_ptr(&ipmmu_pm),
},
.probe = ipmmu_probe,
.remove_new = ipmmu_remove,
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index f86af9815d..989e0869d8 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -598,7 +598,7 @@ static void print_ctx_regs(void __iomem *base, int ctx)
static int insert_iommu_master(struct device *dev,
struct msm_iommu_dev **iommu,
- struct of_phandle_args *spec)
+ const struct of_phandle_args *spec)
{
struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
int sid;
@@ -626,7 +626,7 @@ static int insert_iommu_master(struct device *dev,
}
static int qcom_iommu_of_xlate(struct device *dev,
- struct of_phandle_args *spec)
+ const struct of_phandle_args *spec)
{
struct msm_iommu_dev *iommu = NULL, *iter;
unsigned long flags;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 51d0eba8cb..6a2707fe7a 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -957,7 +957,8 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
return group;
}
-static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int mtk_iommu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct platform_device *m4updev;
@@ -1264,7 +1265,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->plat_data = of_device_get_match_data(dev);
/* Protect memory. HW will access here while translation fault.*/
- protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
+ protect = devm_kcalloc(dev, 2, MTK_PROTECT_PA_ALIGN, GFP_KERNEL);
if (!protect)
return -ENOMEM;
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 32cc8341d3..d6e4002200 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -398,7 +398,8 @@ static const struct iommu_ops mtk_iommu_v1_ops;
* MTK generation one iommu HW only support one iommu domain, and all the client
* sharing the same iova address space.
*/
-static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_args *args)
+static int mtk_iommu_v1_create_mapping(struct device *dev,
+ const struct of_phandle_args *args)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct mtk_iommu_v1_data *data;
@@ -622,8 +623,8 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
data->dev = dev;
/* Protect memory. HW will access here while translation fault.*/
- protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2,
- GFP_KERNEL | GFP_DMA);
+ protect = devm_kcalloc(dev, 2, MTK_PROTECT_PA_ALIGN,
+ GFP_KERNEL | GFP_DMA);
if (!protect)
return -ENOMEM;
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 719652b608..3afe0b48a4 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -29,7 +29,7 @@ static int of_iommu_xlate(struct device *dev,
!of_device_is_available(iommu_spec->np))
return -ENODEV;
- ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
+ ret = iommu_fwspec_init(dev, fwnode, ops);
if (ret)
return ret;
/*
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 2685861c0a..da79d9f4cf 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1140,7 +1140,7 @@ static void rk_iommu_release_device(struct device *dev)
}
static int rk_iommu_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct platform_device *iommu_dev;
struct rk_iommudata *data;
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index 537359f109..ba53571a82 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -390,7 +390,8 @@ static struct iommu_device *sprd_iommu_probe_device(struct device *dev)
return &sdev->iommu;
}
-static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int sprd_iommu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct platform_device *pdev;
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index 41484a5a39..decd52cba9 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -819,7 +819,7 @@ static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
}
static int sun50i_iommu_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
unsigned id = args->args[0];
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 310871728a..14e525bd0d 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -830,7 +830,7 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
}
static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
const struct iommu_ops *ops = smmu->iommu.ops;
int err;
@@ -959,7 +959,7 @@ static struct iommu_group *tegra_smmu_device_group(struct device *dev)
}
static int tegra_smmu_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
struct tegra_mc *mc = platform_get_drvdata(iommu_pdev);
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 34db37fd96..04048f64a2 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -1051,7 +1051,8 @@ static struct iommu_group *viommu_device_group(struct device *dev)
return generic_device_group(dev);
}
-static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int viommu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
return iommu_fwspec_add_ids(dev, args->args, 1);
}