summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/arm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/iommu/arm
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/iommu/arm')
-rw-r--r--drivers/iommu/arm/Makefile2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/Makefile5
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c597
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c3953
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h813
-rw-r--r--drivers/iommu/arm/arm-smmu/Makefile6
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c229
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c345
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c51
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c579
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h39
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c2304
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h534
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c915
14 files changed, 10372 insertions, 0 deletions
diff --git a/drivers/iommu/arm/Makefile b/drivers/iommu/arm/Makefile
new file mode 100644
index 0000000000..0f9efeab70
--- /dev/null
+++ b/drivers/iommu/arm/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += arm-smmu/ arm-smmu-v3/
diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile
new file mode 100644
index 0000000000..54feb1eccc
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu-v3/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o
+arm_smmu_v3-objs-y += arm-smmu-v3.o
+arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
+arm_smmu_v3-objs := $(arm_smmu_v3-objs-y)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
new file mode 100644
index 0000000000..8a16cd3ef4
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementation of the IOMMU SVA API for the ARM SMMUv3
+ */
+
+#include <linux/mm.h>
+#include <linux/mmu_context.h>
+#include <linux/mmu_notifier.h>
+#include <linux/sched/mm.h>
+#include <linux/slab.h>
+
+#include "arm-smmu-v3.h"
+#include "../../iommu-sva.h"
+#include "../../io-pgtable-arm.h"
+
+struct arm_smmu_mmu_notifier {
+ struct mmu_notifier mn;
+ struct arm_smmu_ctx_desc *cd;
+ bool cleared;
+ refcount_t refs;
+ struct list_head list;
+ struct arm_smmu_domain *domain;
+};
+
+#define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
+
+struct arm_smmu_bond {
+ struct iommu_sva sva;
+ struct mm_struct *mm;
+ struct arm_smmu_mmu_notifier *smmu_mn;
+ struct list_head list;
+ refcount_t refs;
+};
+
+#define sva_to_bond(handle) \
+ container_of(handle, struct arm_smmu_bond, sva)
+
+static DEFINE_MUTEX(sva_lock);
+
+/*
+ * Check if the CPU ASID is available on the SMMU side. If a private context
+ * descriptor is using it, try to replace it.
+ */
+static struct arm_smmu_ctx_desc *
+arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
+{
+ int ret;
+ u32 new_asid;
+ struct arm_smmu_ctx_desc *cd;
+ struct arm_smmu_device *smmu;
+ struct arm_smmu_domain *smmu_domain;
+
+ cd = xa_load(&arm_smmu_asid_xa, asid);
+ if (!cd)
+ return NULL;
+
+ if (cd->mm) {
+ if (WARN_ON(cd->mm != mm))
+ return ERR_PTR(-EINVAL);
+ /* All devices bound to this mm use the same cd struct. */
+ refcount_inc(&cd->refs);
+ return cd;
+ }
+
+ smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
+ smmu = smmu_domain->smmu;
+
+ ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
+ XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
+ if (ret)
+ return ERR_PTR(-ENOSPC);
+ /*
+ * Race with unmap: TLB invalidations will start targeting the new ASID,
+ * which isn't assigned yet. We'll do an invalidate-all on the old ASID
+ * later, so it doesn't matter.
+ */
+ cd->asid = new_asid;
+ /*
+ * Update ASID and invalidate CD in all associated masters. There will
+ * be some overlap between use of both ASIDs, until we invalidate the
+ * TLB.
+ */
+ arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, cd);
+
+ /* Invalidate TLB entries previously associated with that context */
+ arm_smmu_tlb_inv_asid(smmu, asid);
+
+ xa_erase(&arm_smmu_asid_xa, asid);
+ return NULL;
+}
+
+static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
+{
+ u16 asid;
+ int err = 0;
+ u64 tcr, par, reg;
+ struct arm_smmu_ctx_desc *cd;
+ struct arm_smmu_ctx_desc *ret = NULL;
+
+ /* Don't free the mm until we release the ASID */
+ mmgrab(mm);
+
+ asid = arm64_mm_context_get(mm);
+ if (!asid) {
+ err = -ESRCH;
+ goto out_drop_mm;
+ }
+
+ cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd) {
+ err = -ENOMEM;
+ goto out_put_context;
+ }
+
+ refcount_set(&cd->refs, 1);
+
+ mutex_lock(&arm_smmu_asid_lock);
+ ret = arm_smmu_share_asid(mm, asid);
+ if (ret) {
+ mutex_unlock(&arm_smmu_asid_lock);
+ goto out_free_cd;
+ }
+
+ err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL);
+ mutex_unlock(&arm_smmu_asid_lock);
+
+ if (err)
+ goto out_free_asid;
+
+ tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
+ CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
+
+ switch (PAGE_SIZE) {
+ case SZ_4K:
+ tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
+ break;
+ case SZ_16K:
+ tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
+ break;
+ case SZ_64K:
+ tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
+ break;
+ default:
+ WARN_ON(1);
+ err = -EINVAL;
+ goto out_free_asid;
+ }
+
+ reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
+ tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
+
+ cd->ttbr = virt_to_phys(mm->pgd);
+ cd->tcr = tcr;
+ /*
+ * MAIR value is pretty much constant and global, so we can just get it
+ * from the current CPU register
+ */
+ cd->mair = read_sysreg(mair_el1);
+ cd->asid = asid;
+ cd->mm = mm;
+
+ return cd;
+
+out_free_asid:
+ arm_smmu_free_asid(cd);
+out_free_cd:
+ kfree(cd);
+out_put_context:
+ arm64_mm_context_put(mm);
+out_drop_mm:
+ mmdrop(mm);
+ return err < 0 ? ERR_PTR(err) : ret;
+}
+
+static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
+{
+ if (arm_smmu_free_asid(cd)) {
+ /* Unpin ASID */
+ arm64_mm_context_put(cd->mm);
+ mmdrop(cd->mm);
+ kfree(cd);
+ }
+}
+
+/*
+ * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
+ * is used as a threshold to replace per-page TLBI commands to issue in the
+ * command queue with an address-space TLBI command, when SMMU w/o a range
+ * invalidation feature handles too many per-page TLBI commands, which will
+ * otherwise result in a soft lockup.
+ */
+#define CMDQ_MAX_TLBI_OPS (1 << (PAGE_SHIFT - 3))
+
+static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
+ struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
+ size_t size;
+
+ /*
+ * The mm_types defines vm_end as the first byte after the end address,
+ * different from IOMMU subsystem using the last address of an address
+ * range. So do a simple translation here by calculating size correctly.
+ */
+ size = end - start;
+ if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) {
+ if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE)
+ size = 0;
+ } else {
+ if (size == ULONG_MAX)
+ size = 0;
+ }
+
+ if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) {
+ if (!size)
+ arm_smmu_tlb_inv_asid(smmu_domain->smmu,
+ smmu_mn->cd->asid);
+ else
+ arm_smmu_tlb_inv_range_asid(start, size,
+ smmu_mn->cd->asid,
+ PAGE_SIZE, false,
+ smmu_domain);
+ }
+
+ arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size);
+}
+
+static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
+{
+ struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
+ struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
+
+ mutex_lock(&sva_lock);
+ if (smmu_mn->cleared) {
+ mutex_unlock(&sva_lock);
+ return;
+ }
+
+ /*
+ * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
+ * but disable translation.
+ */
+ arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd);
+
+ arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
+ arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
+
+ smmu_mn->cleared = true;
+ mutex_unlock(&sva_lock);
+}
+
+static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn)
+{
+ kfree(mn_to_smmu(mn));
+}
+
+static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
+ .arch_invalidate_secondary_tlbs = arm_smmu_mm_arch_invalidate_secondary_tlbs,
+ .release = arm_smmu_mm_release,
+ .free_notifier = arm_smmu_mmu_notifier_free,
+};
+
+/* Allocate or get existing MMU notifier for this {domain, mm} pair */
+static struct arm_smmu_mmu_notifier *
+arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
+ struct mm_struct *mm)
+{
+ int ret;
+ struct arm_smmu_ctx_desc *cd;
+ struct arm_smmu_mmu_notifier *smmu_mn;
+
+ list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
+ if (smmu_mn->mn.mm == mm) {
+ refcount_inc(&smmu_mn->refs);
+ return smmu_mn;
+ }
+ }
+
+ cd = arm_smmu_alloc_shared_cd(mm);
+ if (IS_ERR(cd))
+ return ERR_CAST(cd);
+
+ smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL);
+ if (!smmu_mn) {
+ ret = -ENOMEM;
+ goto err_free_cd;
+ }
+
+ refcount_set(&smmu_mn->refs, 1);
+ smmu_mn->cd = cd;
+ smmu_mn->domain = smmu_domain;
+ smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops;
+
+ ret = mmu_notifier_register(&smmu_mn->mn, mm);
+ if (ret) {
+ kfree(smmu_mn);
+ goto err_free_cd;
+ }
+
+ ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd);
+ if (ret)
+ goto err_put_notifier;
+
+ list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
+ return smmu_mn;
+
+err_put_notifier:
+ /* Frees smmu_mn */
+ mmu_notifier_put(&smmu_mn->mn);
+err_free_cd:
+ arm_smmu_free_shared_cd(cd);
+ return ERR_PTR(ret);
+}
+
+static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
+{
+ struct mm_struct *mm = smmu_mn->mn.mm;
+ struct arm_smmu_ctx_desc *cd = smmu_mn->cd;
+ struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
+
+ if (!refcount_dec_and_test(&smmu_mn->refs))
+ return;
+
+ list_del(&smmu_mn->list);
+ arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL);
+
+ /*
+ * If we went through clear(), we've already invalidated, and no
+ * new TLB entry can have been formed.
+ */
+ if (!smmu_mn->cleared) {
+ arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
+ arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
+ }
+
+ /* Frees smmu_mn */
+ mmu_notifier_put(&smmu_mn->mn);
+ arm_smmu_free_shared_cd(cd);
+}
+
+static struct iommu_sva *
+__arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
+{
+ int ret;
+ struct arm_smmu_bond *bond;
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ if (!master || !master->sva_enabled)
+ return ERR_PTR(-ENODEV);
+
+ /* If bind() was already called for this {dev, mm} pair, reuse it. */
+ list_for_each_entry(bond, &master->bonds, list) {
+ if (bond->mm == mm) {
+ refcount_inc(&bond->refs);
+ return &bond->sva;
+ }
+ }
+
+ bond = kzalloc(sizeof(*bond), GFP_KERNEL);
+ if (!bond)
+ return ERR_PTR(-ENOMEM);
+
+ bond->mm = mm;
+ bond->sva.dev = dev;
+ refcount_set(&bond->refs, 1);
+
+ bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
+ if (IS_ERR(bond->smmu_mn)) {
+ ret = PTR_ERR(bond->smmu_mn);
+ goto err_free_bond;
+ }
+
+ list_add(&bond->list, &master->bonds);
+ return &bond->sva;
+
+err_free_bond:
+ kfree(bond);
+ return ERR_PTR(ret);
+}
+
+bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
+{
+ unsigned long reg, fld;
+ unsigned long oas;
+ unsigned long asid_bits;
+ u32 feat_mask = ARM_SMMU_FEAT_COHERENCY;
+
+ if (vabits_actual == 52)
+ feat_mask |= ARM_SMMU_FEAT_VAX;
+
+ if ((smmu->features & feat_mask) != feat_mask)
+ return false;
+
+ if (!(smmu->pgsize_bitmap & PAGE_SIZE))
+ return false;
+
+ /*
+ * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
+ * not even pretending to support AArch32 here. Abort if the MMU outputs
+ * addresses larger than what we support.
+ */
+ reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
+ oas = id_aa64mmfr0_parange_to_phys_shift(fld);
+ if (smmu->oas < oas)
+ return false;
+
+ /* We can support bigger ASIDs than the CPU, but not smaller */
+ fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
+ asid_bits = fld ? 16 : 8;
+ if (smmu->asid_bits < asid_bits)
+ return false;
+
+ /*
+ * See max_pinned_asids in arch/arm64/mm/context.c. The following is
+ * generally the maximum number of bindable processes.
+ */
+ if (arm64_kernel_unmapped_at_el0())
+ asid_bits--;
+ dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
+ num_possible_cpus() - 2);
+
+ return true;
+}
+
+bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
+{
+ /* We're not keeping track of SIDs in fault events */
+ if (master->num_streams != 1)
+ return false;
+
+ return master->stall_enabled;
+}
+
+bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
+{
+ if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
+ return false;
+
+ /* SSID support is mandatory for the moment */
+ return master->ssid_bits;
+}
+
+bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
+{
+ bool enabled;
+
+ mutex_lock(&sva_lock);
+ enabled = master->sva_enabled;
+ mutex_unlock(&sva_lock);
+ return enabled;
+}
+
+static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
+{
+ int ret;
+ struct device *dev = master->dev;
+
+ /*
+ * Drivers for devices supporting PRI or stall should enable IOPF first.
+ * Others have device-specific fault handlers and don't need IOPF.
+ */
+ if (!arm_smmu_master_iopf_supported(master))
+ return 0;
+
+ if (!master->iopf_enabled)
+ return -EINVAL;
+
+ ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
+ if (ret)
+ return ret;
+
+ ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
+ if (ret) {
+ iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
+ return ret;
+ }
+ return 0;
+}
+
+static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
+{
+ struct device *dev = master->dev;
+
+ if (!master->iopf_enabled)
+ return;
+
+ iommu_unregister_device_fault_handler(dev);
+ iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
+}
+
+int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
+{
+ int ret;
+
+ mutex_lock(&sva_lock);
+ ret = arm_smmu_master_sva_enable_iopf(master);
+ if (!ret)
+ master->sva_enabled = true;
+ mutex_unlock(&sva_lock);
+
+ return ret;
+}
+
+int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
+{
+ mutex_lock(&sva_lock);
+ if (!list_empty(&master->bonds)) {
+ dev_err(master->dev, "cannot disable SVA, device is bound\n");
+ mutex_unlock(&sva_lock);
+ return -EBUSY;
+ }
+ arm_smmu_master_sva_disable_iopf(master);
+ master->sva_enabled = false;
+ mutex_unlock(&sva_lock);
+
+ return 0;
+}
+
+void arm_smmu_sva_notifier_synchronize(void)
+{
+ /*
+ * Some MMU notifiers may still be waiting to be freed, using
+ * arm_smmu_mmu_notifier_free(). Wait for them.
+ */
+ mmu_notifier_synchronize();
+}
+
+void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t id)
+{
+ struct mm_struct *mm = domain->mm;
+ struct arm_smmu_bond *bond = NULL, *t;
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+ mutex_lock(&sva_lock);
+ list_for_each_entry(t, &master->bonds, list) {
+ if (t->mm == mm) {
+ bond = t;
+ break;
+ }
+ }
+
+ if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) {
+ list_del(&bond->list);
+ arm_smmu_mmu_notifier_put(bond->smmu_mn);
+ kfree(bond);
+ }
+ mutex_unlock(&sva_lock);
+}
+
+static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t id)
+{
+ int ret = 0;
+ struct iommu_sva *handle;
+ struct mm_struct *mm = domain->mm;
+
+ mutex_lock(&sva_lock);
+ handle = __arm_smmu_sva_bind(dev, mm);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
+ mutex_unlock(&sva_lock);
+
+ return ret;
+}
+
+static void arm_smmu_sva_domain_free(struct iommu_domain *domain)
+{
+ kfree(domain);
+}
+
+static const struct iommu_domain_ops arm_smmu_sva_domain_ops = {
+ .set_dev_pasid = arm_smmu_sva_set_dev_pasid,
+ .free = arm_smmu_sva_domain_free
+};
+
+struct iommu_domain *arm_smmu_sva_domain_alloc(void)
+{
+ struct iommu_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+ domain->ops = &arm_smmu_sva_domain_ops;
+
+ return domain;
+}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
new file mode 100644
index 0000000000..bd0a596f98
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -0,0 +1,3953 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IOMMU API for ARM architected SMMUv3 implementations.
+ *
+ * Copyright (C) 2015 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ *
+ * This driver is powered by bad coffee and bombay mix.
+ */
+
+#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
+#include <linux/bitops.h>
+#include <linux/crash_dump.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io-pgtable.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/pci-ats.h>
+#include <linux/platform_device.h>
+
+#include "arm-smmu-v3.h"
+#include "../../dma-iommu.h"
+#include "../../iommu-sva.h"
+
+static bool disable_bypass = true;
+module_param(disable_bypass, bool, 0444);
+MODULE_PARM_DESC(disable_bypass,
+ "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
+
+static bool disable_msipolling;
+module_param(disable_msipolling, bool, 0444);
+MODULE_PARM_DESC(disable_msipolling,
+ "Disable MSI-based polling for CMD_SYNC completion.");
+
+enum arm_smmu_msi_index {
+ EVTQ_MSI_INDEX,
+ GERROR_MSI_INDEX,
+ PRIQ_MSI_INDEX,
+ ARM_SMMU_MAX_MSIS,
+};
+
+static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
+ [EVTQ_MSI_INDEX] = {
+ ARM_SMMU_EVTQ_IRQ_CFG0,
+ ARM_SMMU_EVTQ_IRQ_CFG1,
+ ARM_SMMU_EVTQ_IRQ_CFG2,
+ },
+ [GERROR_MSI_INDEX] = {
+ ARM_SMMU_GERROR_IRQ_CFG0,
+ ARM_SMMU_GERROR_IRQ_CFG1,
+ ARM_SMMU_GERROR_IRQ_CFG2,
+ },
+ [PRIQ_MSI_INDEX] = {
+ ARM_SMMU_PRIQ_IRQ_CFG0,
+ ARM_SMMU_PRIQ_IRQ_CFG1,
+ ARM_SMMU_PRIQ_IRQ_CFG2,
+ },
+};
+
+struct arm_smmu_option_prop {
+ u32 opt;
+ const char *prop;
+};
+
+DEFINE_XARRAY_ALLOC1(arm_smmu_asid_xa);
+DEFINE_MUTEX(arm_smmu_asid_lock);
+
+/*
+ * Special value used by SVA when a process dies, to quiesce a CD without
+ * disabling it.
+ */
+struct arm_smmu_ctx_desc quiet_cd = { 0 };
+
+static struct arm_smmu_option_prop arm_smmu_options[] = {
+ { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
+ { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
+ { 0, NULL},
+};
+
+static void parse_driver_options(struct arm_smmu_device *smmu)
+{
+ int i = 0;
+
+ do {
+ if (of_property_read_bool(smmu->dev->of_node,
+ arm_smmu_options[i].prop)) {
+ smmu->options |= arm_smmu_options[i].opt;
+ dev_notice(smmu->dev, "option %s\n",
+ arm_smmu_options[i].prop);
+ }
+ } while (arm_smmu_options[++i].opt);
+}
+
+/* Low-level queue manipulation functions */
+static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n)
+{
+ u32 space, prod, cons;
+
+ prod = Q_IDX(q, q->prod);
+ cons = Q_IDX(q, q->cons);
+
+ if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons))
+ space = (1 << q->max_n_shift) - (prod - cons);
+ else
+ space = cons - prod;
+
+ return space >= n;
+}
+
+static bool queue_full(struct arm_smmu_ll_queue *q)
+{
+ return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
+ Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
+}
+
+static bool queue_empty(struct arm_smmu_ll_queue *q)
+{
+ return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
+ Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
+}
+
+static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod)
+{
+ return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) &&
+ (Q_IDX(q, q->cons) > Q_IDX(q, prod))) ||
+ ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) &&
+ (Q_IDX(q, q->cons) <= Q_IDX(q, prod)));
+}
+
+static void queue_sync_cons_out(struct arm_smmu_queue *q)
+{
+ /*
+ * Ensure that all CPU accesses (reads and writes) to the queue
+ * are complete before we update the cons pointer.
+ */
+ __iomb();
+ writel_relaxed(q->llq.cons, q->cons_reg);
+}
+
+static void queue_inc_cons(struct arm_smmu_ll_queue *q)
+{
+ u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
+ q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
+}
+
+static void queue_sync_cons_ovf(struct arm_smmu_queue *q)
+{
+ struct arm_smmu_ll_queue *llq = &q->llq;
+
+ if (likely(Q_OVF(llq->prod) == Q_OVF(llq->cons)))
+ return;
+
+ llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
+ Q_IDX(llq, llq->cons);
+ queue_sync_cons_out(q);
+}
+
+static int queue_sync_prod_in(struct arm_smmu_queue *q)
+{
+ u32 prod;
+ int ret = 0;
+
+ /*
+ * We can't use the _relaxed() variant here, as we must prevent
+ * speculative reads of the queue before we have determined that
+ * prod has indeed moved.
+ */
+ prod = readl(q->prod_reg);
+
+ if (Q_OVF(prod) != Q_OVF(q->llq.prod))
+ ret = -EOVERFLOW;
+
+ q->llq.prod = prod;
+ return ret;
+}
+
+static u32 queue_inc_prod_n(struct arm_smmu_ll_queue *q, int n)
+{
+ u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n;
+ return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
+}
+
+static void queue_poll_init(struct arm_smmu_device *smmu,
+ struct arm_smmu_queue_poll *qp)
+{
+ qp->delay = 1;
+ qp->spin_cnt = 0;
+ qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
+ qp->timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
+}
+
+static int queue_poll(struct arm_smmu_queue_poll *qp)
+{
+ if (ktime_compare(ktime_get(), qp->timeout) > 0)
+ return -ETIMEDOUT;
+
+ if (qp->wfe) {
+ wfe();
+ } else if (++qp->spin_cnt < ARM_SMMU_POLL_SPIN_COUNT) {
+ cpu_relax();
+ } else {
+ udelay(qp->delay);
+ qp->delay *= 2;
+ qp->spin_cnt = 0;
+ }
+
+ return 0;
+}
+
+static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
+{
+ int i;
+
+ for (i = 0; i < n_dwords; ++i)
+ *dst++ = cpu_to_le64(*src++);
+}
+
+static void queue_read(u64 *dst, __le64 *src, size_t n_dwords)
+{
+ int i;
+
+ for (i = 0; i < n_dwords; ++i)
+ *dst++ = le64_to_cpu(*src++);
+}
+
+static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
+{
+ if (queue_empty(&q->llq))
+ return -EAGAIN;
+
+ queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords);
+ queue_inc_cons(&q->llq);
+ queue_sync_cons_out(q);
+ return 0;
+}
+
+/* High-level queue accessors */
+static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
+{
+ memset(cmd, 0, 1 << CMDQ_ENT_SZ_SHIFT);
+ cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode);
+
+ switch (ent->opcode) {
+ case CMDQ_OP_TLBI_EL2_ALL:
+ case CMDQ_OP_TLBI_NSNH_ALL:
+ break;
+ case CMDQ_OP_PREFETCH_CFG:
+ cmd[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID, ent->prefetch.sid);
+ break;
+ case CMDQ_OP_CFGI_CD:
+ cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid);
+ fallthrough;
+ case CMDQ_OP_CFGI_STE:
+ cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
+ cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf);
+ break;
+ case CMDQ_OP_CFGI_CD_ALL:
+ cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
+ break;
+ case CMDQ_OP_CFGI_ALL:
+ /* Cover the entire SID range */
+ cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
+ break;
+ case CMDQ_OP_TLBI_NH_VA:
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
+ fallthrough;
+ case CMDQ_OP_TLBI_EL2_VA:
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg);
+ cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
+ break;
+ case CMDQ_OP_TLBI_S2_IPA:
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg);
+ cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
+ break;
+ case CMDQ_OP_TLBI_NH_ASID:
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
+ fallthrough;
+ case CMDQ_OP_TLBI_S12_VMALL:
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
+ break;
+ case CMDQ_OP_TLBI_EL2_ASID:
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
+ break;
+ case CMDQ_OP_ATC_INV:
+ cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
+ cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global);
+ cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SSID, ent->atc.ssid);
+ cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SID, ent->atc.sid);
+ cmd[1] |= FIELD_PREP(CMDQ_ATC_1_SIZE, ent->atc.size);
+ cmd[1] |= ent->atc.addr & CMDQ_ATC_1_ADDR_MASK;
+ break;
+ case CMDQ_OP_PRI_RESP:
+ cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
+ cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid);
+ cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SID, ent->pri.sid);
+ cmd[1] |= FIELD_PREP(CMDQ_PRI_1_GRPID, ent->pri.grpid);
+ switch (ent->pri.resp) {
+ case PRI_RESP_DENY:
+ case PRI_RESP_FAIL:
+ case PRI_RESP_SUCC:
+ break;
+ default:
+ return -EINVAL;
+ }
+ cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp);
+ break;
+ case CMDQ_OP_RESUME:
+ cmd[0] |= FIELD_PREP(CMDQ_RESUME_0_SID, ent->resume.sid);
+ cmd[0] |= FIELD_PREP(CMDQ_RESUME_0_RESP, ent->resume.resp);
+ cmd[1] |= FIELD_PREP(CMDQ_RESUME_1_STAG, ent->resume.stag);
+ break;
+ case CMDQ_OP_CMD_SYNC:
+ if (ent->sync.msiaddr) {
+ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ);
+ cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
+ } else {
+ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
+ }
+ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
+ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu)
+{
+ return &smmu->cmdq;
+}
+
+static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
+ struct arm_smmu_queue *q, u32 prod)
+{
+ struct arm_smmu_cmdq_ent ent = {
+ .opcode = CMDQ_OP_CMD_SYNC,
+ };
+
+ /*
+ * Beware that Hi16xx adds an extra 32 bits of goodness to its MSI
+ * payload, so the write will zero the entire command on that platform.
+ */
+ if (smmu->options & ARM_SMMU_OPT_MSIPOLL) {
+ ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) *
+ q->ent_dwords * 8;
+ }
+
+ arm_smmu_cmdq_build_cmd(cmd, &ent);
+}
+
+static void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
+ struct arm_smmu_queue *q)
+{
+ static const char * const cerror_str[] = {
+ [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
+ [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
+ [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
+ [CMDQ_ERR_CERROR_ATC_INV_IDX] = "ATC invalidate timeout",
+ };
+
+ int i;
+ u64 cmd[CMDQ_ENT_DWORDS];
+ u32 cons = readl_relaxed(q->cons_reg);
+ u32 idx = FIELD_GET(CMDQ_CONS_ERR, cons);
+ struct arm_smmu_cmdq_ent cmd_sync = {
+ .opcode = CMDQ_OP_CMD_SYNC,
+ };
+
+ dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
+ idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown");
+
+ switch (idx) {
+ case CMDQ_ERR_CERROR_ABT_IDX:
+ dev_err(smmu->dev, "retrying command fetch\n");
+ return;
+ case CMDQ_ERR_CERROR_NONE_IDX:
+ return;
+ case CMDQ_ERR_CERROR_ATC_INV_IDX:
+ /*
+ * ATC Invalidation Completion timeout. CONS is still pointing
+ * at the CMD_SYNC. Attempt to complete other pending commands
+ * by repeating the CMD_SYNC, though we might well end up back
+ * here since the ATC invalidation may still be pending.
+ */
+ return;
+ case CMDQ_ERR_CERROR_ILL_IDX:
+ default:
+ break;
+ }
+
+ /*
+ * We may have concurrent producers, so we need to be careful
+ * not to touch any of the shadow cmdq state.
+ */
+ queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
+ dev_err(smmu->dev, "skipping command in error state:\n");
+ for (i = 0; i < ARRAY_SIZE(cmd); ++i)
+ dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
+
+ /* Convert the erroneous command into a CMD_SYNC */
+ arm_smmu_cmdq_build_cmd(cmd, &cmd_sync);
+
+ queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
+}
+
+static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
+{
+ __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq.q);
+}
+
+/*
+ * Command queue locking.
+ * This is a form of bastardised rwlock with the following major changes:
+ *
+ * - The only LOCK routines are exclusive_trylock() and shared_lock().
+ * Neither have barrier semantics, and instead provide only a control
+ * dependency.
+ *
+ * - The UNLOCK routines are supplemented with shared_tryunlock(), which
+ * fails if the caller appears to be the last lock holder (yes, this is
+ * racy). All successful UNLOCK routines have RELEASE semantics.
+ */
+static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq)
+{
+ int val;
+
+ /*
+ * We can try to avoid the cmpxchg() loop by simply incrementing the
+ * lock counter. When held in exclusive state, the lock counter is set
+ * to INT_MIN so these increments won't hurt as the value will remain
+ * negative.
+ */
+ if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0)
+ return;
+
+ do {
+ val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0);
+ } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val);
+}
+
+static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq)
+{
+ (void)atomic_dec_return_release(&cmdq->lock);
+}
+
+static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq)
+{
+ if (atomic_read(&cmdq->lock) == 1)
+ return false;
+
+ arm_smmu_cmdq_shared_unlock(cmdq);
+ return true;
+}
+
+#define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags) \
+({ \
+ bool __ret; \
+ local_irq_save(flags); \
+ __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \
+ if (!__ret) \
+ local_irq_restore(flags); \
+ __ret; \
+})
+
+#define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \
+({ \
+ atomic_set_release(&cmdq->lock, 0); \
+ local_irq_restore(flags); \
+})
+
+
+/*
+ * Command queue insertion.
+ * This is made fiddly by our attempts to achieve some sort of scalability
+ * since there is one queue shared amongst all of the CPUs in the system. If
+ * you like mixed-size concurrency, dependency ordering and relaxed atomics,
+ * then you'll *love* this monstrosity.
+ *
+ * The basic idea is to split the queue up into ranges of commands that are
+ * owned by a given CPU; the owner may not have written all of the commands
+ * itself, but is responsible for advancing the hardware prod pointer when
+ * the time comes. The algorithm is roughly:
+ *
+ * 1. Allocate some space in the queue. At this point we also discover
+ * whether the head of the queue is currently owned by another CPU,
+ * or whether we are the owner.
+ *
+ * 2. Write our commands into our allocated slots in the queue.
+ *
+ * 3. Mark our slots as valid in arm_smmu_cmdq.valid_map.
+ *
+ * 4. If we are an owner:
+ * a. Wait for the previous owner to finish.
+ * b. Mark the queue head as unowned, which tells us the range
+ * that we are responsible for publishing.
+ * c. Wait for all commands in our owned range to become valid.
+ * d. Advance the hardware prod pointer.
+ * e. Tell the next owner we've finished.
+ *
+ * 5. If we are inserting a CMD_SYNC (we may or may not have been an
+ * owner), then we need to stick around until it has completed:
+ * a. If we have MSIs, the SMMU can write back into the CMD_SYNC
+ * to clear the first 4 bytes.
+ * b. Otherwise, we spin waiting for the hardware cons pointer to
+ * advance past our command.
+ *
+ * The devil is in the details, particularly the use of locking for handling
+ * SYNC completion and freeing up space in the queue before we think that it is
+ * full.
+ */
+static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq,
+ u32 sprod, u32 eprod, bool set)
+{
+ u32 swidx, sbidx, ewidx, ebidx;
+ struct arm_smmu_ll_queue llq = {
+ .max_n_shift = cmdq->q.llq.max_n_shift,
+ .prod = sprod,
+ };
+
+ ewidx = BIT_WORD(Q_IDX(&llq, eprod));
+ ebidx = Q_IDX(&llq, eprod) % BITS_PER_LONG;
+
+ while (llq.prod != eprod) {
+ unsigned long mask;
+ atomic_long_t *ptr;
+ u32 limit = BITS_PER_LONG;
+
+ swidx = BIT_WORD(Q_IDX(&llq, llq.prod));
+ sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG;
+
+ ptr = &cmdq->valid_map[swidx];
+
+ if ((swidx == ewidx) && (sbidx < ebidx))
+ limit = ebidx;
+
+ mask = GENMASK(limit - 1, sbidx);
+
+ /*
+ * The valid bit is the inverse of the wrap bit. This means
+ * that a zero-initialised queue is invalid and, after marking
+ * all entries as valid, they become invalid again when we
+ * wrap.
+ */
+ if (set) {
+ atomic_long_xor(mask, ptr);
+ } else { /* Poll */
+ unsigned long valid;
+
+ valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask;
+ atomic_long_cond_read_relaxed(ptr, (VAL & mask) == valid);
+ }
+
+ llq.prod = queue_inc_prod_n(&llq, limit - sbidx);
+ }
+}
+
+/* Mark all entries in the range [sprod, eprod) as valid */
+static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq,
+ u32 sprod, u32 eprod)
+{
+ __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true);
+}
+
+/* Wait for all entries in the range [sprod, eprod) to become valid */
+static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
+ u32 sprod, u32 eprod)
+{
+ __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false);
+}
+
+/* Wait for the command queue to become non-full */
+static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
+ struct arm_smmu_ll_queue *llq)
+{
+ unsigned long flags;
+ struct arm_smmu_queue_poll qp;
+ struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
+ int ret = 0;
+
+ /*
+ * Try to update our copy of cons by grabbing exclusive cmdq access. If
+ * that fails, spin until somebody else updates it for us.
+ */
+ if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) {
+ WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
+ arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags);
+ llq->val = READ_ONCE(cmdq->q.llq.val);
+ return 0;
+ }
+
+ queue_poll_init(smmu, &qp);
+ do {
+ llq->val = READ_ONCE(cmdq->q.llq.val);
+ if (!queue_full(llq))
+ break;
+
+ ret = queue_poll(&qp);
+ } while (!ret);
+
+ return ret;
+}
+
+/*
+ * Wait until the SMMU signals a CMD_SYNC completion MSI.
+ * Must be called with the cmdq lock held in some capacity.
+ */
+static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
+ struct arm_smmu_ll_queue *llq)
+{
+ int ret = 0;
+ struct arm_smmu_queue_poll qp;
+ struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
+ u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
+
+ queue_poll_init(smmu, &qp);
+
+ /*
+ * The MSI won't generate an event, since it's being written back
+ * into the command queue.
+ */
+ qp.wfe = false;
+ smp_cond_load_relaxed(cmd, !VAL || (ret = queue_poll(&qp)));
+ llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1);
+ return ret;
+}
+
+/*
+ * Wait until the SMMU cons index passes llq->prod.
+ * Must be called with the cmdq lock held in some capacity.
+ */
+static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
+ struct arm_smmu_ll_queue *llq)
+{
+ struct arm_smmu_queue_poll qp;
+ struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
+ u32 prod = llq->prod;
+ int ret = 0;
+
+ queue_poll_init(smmu, &qp);
+ llq->val = READ_ONCE(cmdq->q.llq.val);
+ do {
+ if (queue_consumed(llq, prod))
+ break;
+
+ ret = queue_poll(&qp);
+
+ /*
+ * This needs to be a readl() so that our subsequent call
+ * to arm_smmu_cmdq_shared_tryunlock() can fail accurately.
+ *
+ * Specifically, we need to ensure that we observe all
+ * shared_lock()s by other CMD_SYNCs that share our owner,
+ * so that a failing call to tryunlock() means that we're
+ * the last one out and therefore we can safely advance
+ * cmdq->q.llq.cons. Roughly speaking:
+ *
+ * CPU 0 CPU1 CPU2 (us)
+ *
+ * if (sync)
+ * shared_lock();
+ *
+ * dma_wmb();
+ * set_valid_map();
+ *
+ * if (owner) {
+ * poll_valid_map();
+ * <control dependency>
+ * writel(prod_reg);
+ *
+ * readl(cons_reg);
+ * tryunlock();
+ *
+ * Requires us to see CPU 0's shared_lock() acquisition.
+ */
+ llq->cons = readl(cmdq->q.cons_reg);
+ } while (!ret);
+
+ return ret;
+}
+
+static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
+ struct arm_smmu_ll_queue *llq)
+{
+ if (smmu->options & ARM_SMMU_OPT_MSIPOLL)
+ return __arm_smmu_cmdq_poll_until_msi(smmu, llq);
+
+ return __arm_smmu_cmdq_poll_until_consumed(smmu, llq);
+}
+
+static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
+ u32 prod, int n)
+{
+ int i;
+ struct arm_smmu_ll_queue llq = {
+ .max_n_shift = cmdq->q.llq.max_n_shift,
+ .prod = prod,
+ };
+
+ for (i = 0; i < n; ++i) {
+ u64 *cmd = &cmds[i * CMDQ_ENT_DWORDS];
+
+ prod = queue_inc_prod_n(&llq, i);
+ queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS);
+ }
+}
+
+/*
+ * This is the actual insertion function, and provides the following
+ * ordering guarantees to callers:
+ *
+ * - There is a dma_wmb() before publishing any commands to the queue.
+ * This can be relied upon to order prior writes to data structures
+ * in memory (such as a CD or an STE) before the command.
+ *
+ * - On completion of a CMD_SYNC, there is a control dependency.
+ * This can be relied upon to order subsequent writes to memory (e.g.
+ * freeing an IOVA) after completion of the CMD_SYNC.
+ *
+ * - Command insertion is totally ordered, so if two CPUs each race to
+ * insert their own list of commands then all of the commands from one
+ * CPU will appear before any of the commands from the other CPU.
+ */
+static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
+ u64 *cmds, int n, bool sync)
+{
+ u64 cmd_sync[CMDQ_ENT_DWORDS];
+ u32 prod;
+ unsigned long flags;
+ bool owner;
+ struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
+ struct arm_smmu_ll_queue llq, head;
+ int ret = 0;
+
+ llq.max_n_shift = cmdq->q.llq.max_n_shift;
+
+ /* 1. Allocate some space in the queue */
+ local_irq_save(flags);
+ llq.val = READ_ONCE(cmdq->q.llq.val);
+ do {
+ u64 old;
+
+ while (!queue_has_space(&llq, n + sync)) {
+ local_irq_restore(flags);
+ if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq))
+ dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
+ local_irq_save(flags);
+ }
+
+ head.cons = llq.cons;
+ head.prod = queue_inc_prod_n(&llq, n + sync) |
+ CMDQ_PROD_OWNED_FLAG;
+
+ old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val);
+ if (old == llq.val)
+ break;
+
+ llq.val = old;
+ } while (1);
+ owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG);
+ head.prod &= ~CMDQ_PROD_OWNED_FLAG;
+ llq.prod &= ~CMDQ_PROD_OWNED_FLAG;
+
+ /*
+ * 2. Write our commands into the queue
+ * Dependency ordering from the cmpxchg() loop above.
+ */
+ arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
+ if (sync) {
+ prod = queue_inc_prod_n(&llq, n);
+ arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, &cmdq->q, prod);
+ queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
+
+ /*
+ * In order to determine completion of our CMD_SYNC, we must
+ * ensure that the queue can't wrap twice without us noticing.
+ * We achieve that by taking the cmdq lock as shared before
+ * marking our slot as valid.
+ */
+ arm_smmu_cmdq_shared_lock(cmdq);
+ }
+
+ /* 3. Mark our slots as valid, ensuring commands are visible first */
+ dma_wmb();
+ arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod);
+
+ /* 4. If we are the owner, take control of the SMMU hardware */
+ if (owner) {
+ /* a. Wait for previous owner to finish */
+ atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod);
+
+ /* b. Stop gathering work by clearing the owned flag */
+ prod = atomic_fetch_andnot_relaxed(CMDQ_PROD_OWNED_FLAG,
+ &cmdq->q.llq.atomic.prod);
+ prod &= ~CMDQ_PROD_OWNED_FLAG;
+
+ /*
+ * c. Wait for any gathered work to be written to the queue.
+ * Note that we read our own entries so that we have the control
+ * dependency required by (d).
+ */
+ arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod);
+
+ /*
+ * d. Advance the hardware prod pointer
+ * Control dependency ordering from the entries becoming valid.
+ */
+ writel_relaxed(prod, cmdq->q.prod_reg);
+
+ /*
+ * e. Tell the next owner we're done
+ * Make sure we've updated the hardware first, so that we don't
+ * race to update prod and potentially move it backwards.
+ */
+ atomic_set_release(&cmdq->owner_prod, prod);
+ }
+
+ /* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */
+ if (sync) {
+ llq.prod = queue_inc_prod_n(&llq, n);
+ ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq);
+ if (ret) {
+ dev_err_ratelimited(smmu->dev,
+ "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n",
+ llq.prod,
+ readl_relaxed(cmdq->q.prod_reg),
+ readl_relaxed(cmdq->q.cons_reg));
+ }
+
+ /*
+ * Try to unlock the cmdq lock. This will fail if we're the last
+ * reader, in which case we can safely update cmdq->q.llq.cons
+ */
+ if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) {
+ WRITE_ONCE(cmdq->q.llq.cons, llq.cons);
+ arm_smmu_cmdq_shared_unlock(cmdq);
+ }
+ }
+
+ local_irq_restore(flags);
+ return ret;
+}
+
+static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_ent *ent,
+ bool sync)
+{
+ u64 cmd[CMDQ_ENT_DWORDS];
+
+ if (unlikely(arm_smmu_cmdq_build_cmd(cmd, ent))) {
+ dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
+ ent->opcode);
+ return -EINVAL;
+ }
+
+ return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, sync);
+}
+
+static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_ent *ent)
+{
+ return __arm_smmu_cmdq_issue_cmd(smmu, ent, false);
+}
+
+static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_ent *ent)
+{
+ return __arm_smmu_cmdq_issue_cmd(smmu, ent, true);
+}
+
+static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_batch *cmds,
+ struct arm_smmu_cmdq_ent *cmd)
+{
+ int index;
+
+ if (cmds->num == CMDQ_BATCH_ENTRIES - 1 &&
+ (smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) {
+ arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
+ cmds->num = 0;
+ }
+
+ if (cmds->num == CMDQ_BATCH_ENTRIES) {
+ arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false);
+ cmds->num = 0;
+ }
+
+ index = cmds->num * CMDQ_ENT_DWORDS;
+ if (unlikely(arm_smmu_cmdq_build_cmd(&cmds->cmds[index], cmd))) {
+ dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
+ cmd->opcode);
+ return;
+ }
+
+ cmds->num++;
+}
+
+static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_batch *cmds)
+{
+ return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
+}
+
+static int arm_smmu_page_response(struct device *dev,
+ struct iommu_fault_event *unused,
+ struct iommu_page_response *resp)
+{
+ struct arm_smmu_cmdq_ent cmd = {0};
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ int sid = master->streams[0].id;
+
+ if (master->stall_enabled) {
+ cmd.opcode = CMDQ_OP_RESUME;
+ cmd.resume.sid = sid;
+ cmd.resume.stag = resp->grpid;
+ switch (resp->code) {
+ case IOMMU_PAGE_RESP_INVALID:
+ case IOMMU_PAGE_RESP_FAILURE:
+ cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT;
+ break;
+ case IOMMU_PAGE_RESP_SUCCESS:
+ cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ return -ENODEV;
+ }
+
+ arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
+ /*
+ * Don't send a SYNC, it doesn't do anything for RESUME or PRI_RESP.
+ * RESUME consumption guarantees that the stalled transaction will be
+ * terminated... at some point in the future. PRI_RESP is fire and
+ * forget.
+ */
+
+ return 0;
+}
+
+/* Context descriptor manipulation functions */
+void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
+{
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = smmu->features & ARM_SMMU_FEAT_E2H ?
+ CMDQ_OP_TLBI_EL2_ASID : CMDQ_OP_TLBI_NH_ASID,
+ .tlbi.asid = asid,
+ };
+
+ arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
+}
+
+static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
+ int ssid, bool leaf)
+{
+ size_t i;
+ unsigned long flags;
+ struct arm_smmu_master *master;
+ struct arm_smmu_cmdq_batch cmds;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = CMDQ_OP_CFGI_CD,
+ .cfgi = {
+ .ssid = ssid,
+ .leaf = leaf,
+ },
+ };
+
+ cmds.num = 0;
+
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+ for (i = 0; i < master->num_streams; i++) {
+ cmd.cfgi.sid = master->streams[i].id;
+ arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
+ }
+ }
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ arm_smmu_cmdq_batch_submit(smmu, &cmds);
+}
+
+static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
+ struct arm_smmu_l1_ctx_desc *l1_desc)
+{
+ size_t size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
+
+ l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size,
+ &l1_desc->l2ptr_dma, GFP_KERNEL);
+ if (!l1_desc->l2ptr) {
+ dev_warn(smmu->dev,
+ "failed to allocate context descriptor table\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void arm_smmu_write_cd_l1_desc(__le64 *dst,
+ struct arm_smmu_l1_ctx_desc *l1_desc)
+{
+ u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
+ CTXDESC_L1_DESC_V;
+
+ /* See comment in arm_smmu_write_ctx_desc() */
+ WRITE_ONCE(*dst, cpu_to_le64(val));
+}
+
+static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
+ u32 ssid)
+{
+ __le64 *l1ptr;
+ unsigned int idx;
+ struct arm_smmu_l1_ctx_desc *l1_desc;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
+
+ if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
+ return cdcfg->cdtab + ssid * CTXDESC_CD_DWORDS;
+
+ idx = ssid >> CTXDESC_SPLIT;
+ l1_desc = &cdcfg->l1_desc[idx];
+ if (!l1_desc->l2ptr) {
+ if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
+ return NULL;
+
+ l1ptr = cdcfg->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
+ arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
+ /* An invalid L1CD can be cached */
+ arm_smmu_sync_cd(smmu_domain, ssid, false);
+ }
+ idx = ssid & (CTXDESC_L2_ENTRIES - 1);
+ return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
+}
+
+int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
+ struct arm_smmu_ctx_desc *cd)
+{
+ /*
+ * This function handles the following cases:
+ *
+ * (1) Install primary CD, for normal DMA traffic (SSID = IOMMU_NO_PASID = 0).
+ * (2) Install a secondary CD, for SID+SSID traffic.
+ * (3) Update ASID of a CD. Atomically write the first 64 bits of the
+ * CD, then invalidate the old entry and mappings.
+ * (4) Quiesce the context without clearing the valid bit. Disable
+ * translation, and ignore any translation fault.
+ * (5) Remove a secondary CD.
+ */
+ u64 val;
+ bool cd_live;
+ __le64 *cdptr;
+
+ if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax)))
+ return -E2BIG;
+
+ cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid);
+ if (!cdptr)
+ return -ENOMEM;
+
+ val = le64_to_cpu(cdptr[0]);
+ cd_live = !!(val & CTXDESC_CD_0_V);
+
+ if (!cd) { /* (5) */
+ val = 0;
+ } else if (cd == &quiet_cd) { /* (4) */
+ val |= CTXDESC_CD_0_TCR_EPD0;
+ } else if (cd_live) { /* (3) */
+ val &= ~CTXDESC_CD_0_ASID;
+ val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid);
+ /*
+ * Until CD+TLB invalidation, both ASIDs may be used for tagging
+ * this substream's traffic
+ */
+ } else { /* (1) and (2) */
+ cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
+ cdptr[2] = 0;
+ cdptr[3] = cpu_to_le64(cd->mair);
+
+ /*
+ * STE is live, and the SMMU might read dwords of this CD in any
+ * order. Ensure that it observes valid values before reading
+ * V=1.
+ */
+ arm_smmu_sync_cd(smmu_domain, ssid, true);
+
+ val = cd->tcr |
+#ifdef __BIG_ENDIAN
+ CTXDESC_CD_0_ENDI |
+#endif
+ CTXDESC_CD_0_R | CTXDESC_CD_0_A |
+ (cd->mm ? 0 : CTXDESC_CD_0_ASET) |
+ CTXDESC_CD_0_AA64 |
+ FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
+ CTXDESC_CD_0_V;
+
+ if (smmu_domain->stall_enabled)
+ val |= CTXDESC_CD_0_S;
+ }
+
+ /*
+ * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3
+ * "Configuration structures and configuration invalidation completion"
+ *
+ * The size of single-copy atomic reads made by the SMMU is
+ * IMPLEMENTATION DEFINED but must be at least 64 bits. Any single
+ * field within an aligned 64-bit span of a structure can be altered
+ * without first making the structure invalid.
+ */
+ WRITE_ONCE(cdptr[0], cpu_to_le64(val));
+ arm_smmu_sync_cd(smmu_domain, ssid, true);
+ return 0;
+}
+
+static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
+{
+ int ret;
+ size_t l1size;
+ size_t max_contexts;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+ struct arm_smmu_ctx_desc_cfg *cdcfg = &cfg->cdcfg;
+
+ max_contexts = 1 << cfg->s1cdmax;
+
+ if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
+ max_contexts <= CTXDESC_L2_ENTRIES) {
+ cfg->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
+ cdcfg->num_l1_ents = max_contexts;
+
+ l1size = max_contexts * (CTXDESC_CD_DWORDS << 3);
+ } else {
+ cfg->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
+ cdcfg->num_l1_ents = DIV_ROUND_UP(max_contexts,
+ CTXDESC_L2_ENTRIES);
+
+ cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents,
+ sizeof(*cdcfg->l1_desc),
+ GFP_KERNEL);
+ if (!cdcfg->l1_desc)
+ return -ENOMEM;
+
+ l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+ }
+
+ cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma,
+ GFP_KERNEL);
+ if (!cdcfg->cdtab) {
+ dev_warn(smmu->dev, "failed to allocate context descriptor\n");
+ ret = -ENOMEM;
+ goto err_free_l1;
+ }
+
+ return 0;
+
+err_free_l1:
+ if (cdcfg->l1_desc) {
+ devm_kfree(smmu->dev, cdcfg->l1_desc);
+ cdcfg->l1_desc = NULL;
+ }
+ return ret;
+}
+
+static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
+{
+ int i;
+ size_t size, l1size;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
+
+ if (cdcfg->l1_desc) {
+ size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
+
+ for (i = 0; i < cdcfg->num_l1_ents; i++) {
+ if (!cdcfg->l1_desc[i].l2ptr)
+ continue;
+
+ dmam_free_coherent(smmu->dev, size,
+ cdcfg->l1_desc[i].l2ptr,
+ cdcfg->l1_desc[i].l2ptr_dma);
+ }
+ devm_kfree(smmu->dev, cdcfg->l1_desc);
+ cdcfg->l1_desc = NULL;
+
+ l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+ } else {
+ l1size = cdcfg->num_l1_ents * (CTXDESC_CD_DWORDS << 3);
+ }
+
+ dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma);
+ cdcfg->cdtab_dma = 0;
+ cdcfg->cdtab = NULL;
+}
+
+bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd)
+{
+ bool free;
+ struct arm_smmu_ctx_desc *old_cd;
+
+ if (!cd->asid)
+ return false;
+
+ free = refcount_dec_and_test(&cd->refs);
+ if (free) {
+ old_cd = xa_erase(&arm_smmu_asid_xa, cd->asid);
+ WARN_ON(old_cd != cd);
+ }
+ return free;
+}
+
+/* Stream table manipulation functions */
+static void
+arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
+{
+ u64 val = 0;
+
+ val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span);
+ val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
+
+ /* See comment in arm_smmu_write_ctx_desc() */
+ WRITE_ONCE(*dst, cpu_to_le64(val));
+}
+
+static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
+{
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = CMDQ_OP_CFGI_STE,
+ .cfgi = {
+ .sid = sid,
+ .leaf = true,
+ },
+ };
+
+ arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
+}
+
+static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
+ __le64 *dst)
+{
+ /*
+ * This is hideously complicated, but we only really care about
+ * three cases at the moment:
+ *
+ * 1. Invalid (all zero) -> bypass/fault (init)
+ * 2. Bypass/fault -> translation/bypass (attach)
+ * 3. Translation/bypass -> bypass/fault (detach)
+ *
+ * Given that we can't update the STE atomically and the SMMU
+ * doesn't read the thing in a defined order, that leaves us
+ * with the following maintenance requirements:
+ *
+ * 1. Update Config, return (init time STEs aren't live)
+ * 2. Write everything apart from dword 0, sync, write dword 0, sync
+ * 3. Update Config, sync
+ */
+ u64 val = le64_to_cpu(dst[0]);
+ bool ste_live = false;
+ struct arm_smmu_device *smmu = NULL;
+ struct arm_smmu_s1_cfg *s1_cfg = NULL;
+ struct arm_smmu_s2_cfg *s2_cfg = NULL;
+ struct arm_smmu_domain *smmu_domain = NULL;
+ struct arm_smmu_cmdq_ent prefetch_cmd = {
+ .opcode = CMDQ_OP_PREFETCH_CFG,
+ .prefetch = {
+ .sid = sid,
+ },
+ };
+
+ if (master) {
+ smmu_domain = master->domain;
+ smmu = master->smmu;
+ }
+
+ if (smmu_domain) {
+ switch (smmu_domain->stage) {
+ case ARM_SMMU_DOMAIN_S1:
+ s1_cfg = &smmu_domain->s1_cfg;
+ break;
+ case ARM_SMMU_DOMAIN_S2:
+ case ARM_SMMU_DOMAIN_NESTED:
+ s2_cfg = &smmu_domain->s2_cfg;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (val & STRTAB_STE_0_V) {
+ switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
+ case STRTAB_STE_0_CFG_BYPASS:
+ break;
+ case STRTAB_STE_0_CFG_S1_TRANS:
+ case STRTAB_STE_0_CFG_S2_TRANS:
+ ste_live = true;
+ break;
+ case STRTAB_STE_0_CFG_ABORT:
+ BUG_ON(!disable_bypass);
+ break;
+ default:
+ BUG(); /* STE corruption */
+ }
+ }
+
+ /* Nuke the existing STE_0 value, as we're going to rewrite it */
+ val = STRTAB_STE_0_V;
+
+ /* Bypass/fault */
+ if (!smmu_domain || !(s1_cfg || s2_cfg)) {
+ if (!smmu_domain && disable_bypass)
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
+ else
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
+
+ dst[0] = cpu_to_le64(val);
+ dst[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
+ dst[2] = 0; /* Nuke the VMID */
+ /*
+ * The SMMU can perform negative caching, so we must sync
+ * the STE regardless of whether the old value was live.
+ */
+ if (smmu)
+ arm_smmu_sync_ste_for_sid(smmu, sid);
+ return;
+ }
+
+ if (s1_cfg) {
+ u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
+ STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
+
+ BUG_ON(ste_live);
+ dst[1] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
+ FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
+ FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
+ FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
+ FIELD_PREP(STRTAB_STE_1_STRW, strw));
+
+ if (smmu->features & ARM_SMMU_FEAT_STALLS &&
+ !master->stall_enabled)
+ dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
+
+ val |= (s1_cfg->cdcfg.cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
+ FIELD_PREP(STRTAB_STE_0_S1CDMAX, s1_cfg->s1cdmax) |
+ FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt);
+ }
+
+ if (s2_cfg) {
+ BUG_ON(ste_live);
+ dst[2] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
+ FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
+#ifdef __BIG_ENDIAN
+ STRTAB_STE_2_S2ENDI |
+#endif
+ STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
+ STRTAB_STE_2_S2R);
+
+ dst[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
+
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
+ }
+
+ if (master->ats_enabled)
+ dst[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
+ STRTAB_STE_1_EATS_TRANS));
+
+ arm_smmu_sync_ste_for_sid(smmu, sid);
+ /* See comment in arm_smmu_write_ctx_desc() */
+ WRITE_ONCE(dst[0], cpu_to_le64(val));
+ arm_smmu_sync_ste_for_sid(smmu, sid);
+
+ /* It's likely that we'll want to use the new STE soon */
+ if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
+ arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+}
+
+static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool force)
+{
+ unsigned int i;
+ u64 val = STRTAB_STE_0_V;
+
+ if (disable_bypass && !force)
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
+ else
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
+
+ for (i = 0; i < nent; ++i) {
+ strtab[0] = cpu_to_le64(val);
+ strtab[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
+ strtab[2] = 0;
+ strtab += STRTAB_STE_DWORDS;
+ }
+}
+
+static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
+{
+ size_t size;
+ void *strtab;
+ struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
+ struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
+
+ if (desc->l2ptr)
+ return 0;
+
+ size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
+ strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
+
+ desc->span = STRTAB_SPLIT + 1;
+ desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
+ GFP_KERNEL);
+ if (!desc->l2ptr) {
+ dev_err(smmu->dev,
+ "failed to allocate l2 stream table for SID %u\n",
+ sid);
+ return -ENOMEM;
+ }
+
+ arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false);
+ arm_smmu_write_strtab_l1_desc(strtab, desc);
+ return 0;
+}
+
+static struct arm_smmu_master *
+arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
+{
+ struct rb_node *node;
+ struct arm_smmu_stream *stream;
+
+ lockdep_assert_held(&smmu->streams_mutex);
+
+ node = smmu->streams.rb_node;
+ while (node) {
+ stream = rb_entry(node, struct arm_smmu_stream, node);
+ if (stream->id < sid)
+ node = node->rb_right;
+ else if (stream->id > sid)
+ node = node->rb_left;
+ else
+ return stream->master;
+ }
+
+ return NULL;
+}
+
+/* IRQ and event handlers */
+static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
+{
+ int ret;
+ u32 reason;
+ u32 perm = 0;
+ struct arm_smmu_master *master;
+ bool ssid_valid = evt[0] & EVTQ_0_SSV;
+ u32 sid = FIELD_GET(EVTQ_0_SID, evt[0]);
+ struct iommu_fault_event fault_evt = { };
+ struct iommu_fault *flt = &fault_evt.fault;
+
+ switch (FIELD_GET(EVTQ_0_ID, evt[0])) {
+ case EVT_ID_TRANSLATION_FAULT:
+ reason = IOMMU_FAULT_REASON_PTE_FETCH;
+ break;
+ case EVT_ID_ADDR_SIZE_FAULT:
+ reason = IOMMU_FAULT_REASON_OOR_ADDRESS;
+ break;
+ case EVT_ID_ACCESS_FAULT:
+ reason = IOMMU_FAULT_REASON_ACCESS;
+ break;
+ case EVT_ID_PERMISSION_FAULT:
+ reason = IOMMU_FAULT_REASON_PERMISSION;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Stage-2 is always pinned at the moment */
+ if (evt[1] & EVTQ_1_S2)
+ return -EFAULT;
+
+ if (evt[1] & EVTQ_1_RnW)
+ perm |= IOMMU_FAULT_PERM_READ;
+ else
+ perm |= IOMMU_FAULT_PERM_WRITE;
+
+ if (evt[1] & EVTQ_1_InD)
+ perm |= IOMMU_FAULT_PERM_EXEC;
+
+ if (evt[1] & EVTQ_1_PnU)
+ perm |= IOMMU_FAULT_PERM_PRIV;
+
+ if (evt[1] & EVTQ_1_STALL) {
+ flt->type = IOMMU_FAULT_PAGE_REQ;
+ flt->prm = (struct iommu_fault_page_request) {
+ .flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
+ .grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
+ .perm = perm,
+ .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
+ };
+
+ if (ssid_valid) {
+ flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+ flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
+ }
+ } else {
+ flt->type = IOMMU_FAULT_DMA_UNRECOV;
+ flt->event = (struct iommu_fault_unrecoverable) {
+ .reason = reason,
+ .flags = IOMMU_FAULT_UNRECOV_ADDR_VALID,
+ .perm = perm,
+ .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
+ };
+
+ if (ssid_valid) {
+ flt->event.flags |= IOMMU_FAULT_UNRECOV_PASID_VALID;
+ flt->event.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
+ }
+ }
+
+ mutex_lock(&smmu->streams_mutex);
+ master = arm_smmu_find_master(smmu, sid);
+ if (!master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = iommu_report_device_fault(master->dev, &fault_evt);
+ if (ret && flt->type == IOMMU_FAULT_PAGE_REQ) {
+ /* Nobody cared, abort the access */
+ struct iommu_page_response resp = {
+ .pasid = flt->prm.pasid,
+ .grpid = flt->prm.grpid,
+ .code = IOMMU_PAGE_RESP_FAILURE,
+ };
+ arm_smmu_page_response(master->dev, &fault_evt, &resp);
+ }
+
+out_unlock:
+ mutex_unlock(&smmu->streams_mutex);
+ return ret;
+}
+
+static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
+{
+ int i, ret;
+ struct arm_smmu_device *smmu = dev;
+ struct arm_smmu_queue *q = &smmu->evtq.q;
+ struct arm_smmu_ll_queue *llq = &q->llq;
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ u64 evt[EVTQ_ENT_DWORDS];
+
+ do {
+ while (!queue_remove_raw(q, evt)) {
+ u8 id = FIELD_GET(EVTQ_0_ID, evt[0]);
+
+ ret = arm_smmu_handle_evt(smmu, evt);
+ if (!ret || !__ratelimit(&rs))
+ continue;
+
+ dev_info(smmu->dev, "event 0x%02x received:\n", id);
+ for (i = 0; i < ARRAY_SIZE(evt); ++i)
+ dev_info(smmu->dev, "\t0x%016llx\n",
+ (unsigned long long)evt[i]);
+
+ cond_resched();
+ }
+
+ /*
+ * Not much we can do on overflow, so scream and pretend we're
+ * trying harder.
+ */
+ if (queue_sync_prod_in(q) == -EOVERFLOW)
+ dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
+ } while (!queue_empty(llq));
+
+ /* Sync our overflow flag, as we believe we're up to speed */
+ queue_sync_cons_ovf(q);
+ return IRQ_HANDLED;
+}
+
+static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
+{
+ u32 sid, ssid;
+ u16 grpid;
+ bool ssv, last;
+
+ sid = FIELD_GET(PRIQ_0_SID, evt[0]);
+ ssv = FIELD_GET(PRIQ_0_SSID_V, evt[0]);
+ ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : IOMMU_NO_PASID;
+ last = FIELD_GET(PRIQ_0_PRG_LAST, evt[0]);
+ grpid = FIELD_GET(PRIQ_1_PRG_IDX, evt[1]);
+
+ dev_info(smmu->dev, "unexpected PRI request received:\n");
+ dev_info(smmu->dev,
+ "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
+ sid, ssid, grpid, last ? "L" : "",
+ evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
+ evt[0] & PRIQ_0_PERM_READ ? "R" : "",
+ evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
+ evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
+ evt[1] & PRIQ_1_ADDR_MASK);
+
+ if (last) {
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = CMDQ_OP_PRI_RESP,
+ .substream_valid = ssv,
+ .pri = {
+ .sid = sid,
+ .ssid = ssid,
+ .grpid = grpid,
+ .resp = PRI_RESP_DENY,
+ },
+ };
+
+ arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ }
+}
+
+static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
+{
+ struct arm_smmu_device *smmu = dev;
+ struct arm_smmu_queue *q = &smmu->priq.q;
+ struct arm_smmu_ll_queue *llq = &q->llq;
+ u64 evt[PRIQ_ENT_DWORDS];
+
+ do {
+ while (!queue_remove_raw(q, evt))
+ arm_smmu_handle_ppr(smmu, evt);
+
+ if (queue_sync_prod_in(q) == -EOVERFLOW)
+ dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
+ } while (!queue_empty(llq));
+
+ /* Sync our overflow flag, as we believe we're up to speed */
+ queue_sync_cons_ovf(q);
+ return IRQ_HANDLED;
+}
+
+static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
+
+static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
+{
+ u32 gerror, gerrorn, active;
+ struct arm_smmu_device *smmu = dev;
+
+ gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
+ gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
+
+ active = gerror ^ gerrorn;
+ if (!(active & GERROR_ERR_MASK))
+ return IRQ_NONE; /* No errors pending */
+
+ dev_warn(smmu->dev,
+ "unexpected global error reported (0x%08x), this could be serious\n",
+ active);
+
+ if (active & GERROR_SFM_ERR) {
+ dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
+ arm_smmu_device_disable(smmu);
+ }
+
+ if (active & GERROR_MSI_GERROR_ABT_ERR)
+ dev_warn(smmu->dev, "GERROR MSI write aborted\n");
+
+ if (active & GERROR_MSI_PRIQ_ABT_ERR)
+ dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
+
+ if (active & GERROR_MSI_EVTQ_ABT_ERR)
+ dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
+
+ if (active & GERROR_MSI_CMDQ_ABT_ERR)
+ dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
+
+ if (active & GERROR_PRIQ_ABT_ERR)
+ dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
+
+ if (active & GERROR_EVTQ_ABT_ERR)
+ dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
+
+ if (active & GERROR_CMDQ_ERR)
+ arm_smmu_cmdq_skip_err(smmu);
+
+ writel(gerror, smmu->base + ARM_SMMU_GERRORN);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev)
+{
+ struct arm_smmu_device *smmu = dev;
+
+ arm_smmu_evtq_thread(irq, dev);
+ if (smmu->features & ARM_SMMU_FEAT_PRI)
+ arm_smmu_priq_thread(irq, dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
+{
+ arm_smmu_gerror_handler(irq, dev);
+ return IRQ_WAKE_THREAD;
+}
+
+static void
+arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
+ struct arm_smmu_cmdq_ent *cmd)
+{
+ size_t log2_span;
+ size_t span_mask;
+ /* ATC invalidates are always on 4096-bytes pages */
+ size_t inval_grain_shift = 12;
+ unsigned long page_start, page_end;
+
+ /*
+ * ATS and PASID:
+ *
+ * If substream_valid is clear, the PCIe TLP is sent without a PASID
+ * prefix. In that case all ATC entries within the address range are
+ * invalidated, including those that were requested with a PASID! There
+ * is no way to invalidate only entries without PASID.
+ *
+ * When using STRTAB_STE_1_S1DSS_SSID0 (reserving CD 0 for non-PASID
+ * traffic), translation requests without PASID create ATC entries
+ * without PASID, which must be invalidated with substream_valid clear.
+ * This has the unpleasant side-effect of invalidating all PASID-tagged
+ * ATC entries within the address range.
+ */
+ *cmd = (struct arm_smmu_cmdq_ent) {
+ .opcode = CMDQ_OP_ATC_INV,
+ .substream_valid = (ssid != IOMMU_NO_PASID),
+ .atc.ssid = ssid,
+ };
+
+ if (!size) {
+ cmd->atc.size = ATC_INV_SIZE_ALL;
+ return;
+ }
+
+ page_start = iova >> inval_grain_shift;
+ page_end = (iova + size - 1) >> inval_grain_shift;
+
+ /*
+ * In an ATS Invalidate Request, the address must be aligned on the
+ * range size, which must be a power of two number of page sizes. We
+ * thus have to choose between grossly over-invalidating the region, or
+ * splitting the invalidation into multiple commands. For simplicity
+ * we'll go with the first solution, but should refine it in the future
+ * if multiple commands are shown to be more efficient.
+ *
+ * Find the smallest power of two that covers the range. The most
+ * significant differing bit between the start and end addresses,
+ * fls(start ^ end), indicates the required span. For example:
+ *
+ * We want to invalidate pages [8; 11]. This is already the ideal range:
+ * x = 0b1000 ^ 0b1011 = 0b11
+ * span = 1 << fls(x) = 4
+ *
+ * To invalidate pages [7; 10], we need to invalidate [0; 15]:
+ * x = 0b0111 ^ 0b1010 = 0b1101
+ * span = 1 << fls(x) = 16
+ */
+ log2_span = fls_long(page_start ^ page_end);
+ span_mask = (1ULL << log2_span) - 1;
+
+ page_start &= ~span_mask;
+
+ cmd->atc.addr = page_start << inval_grain_shift;
+ cmd->atc.size = log2_span;
+}
+
+static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
+{
+ int i;
+ struct arm_smmu_cmdq_ent cmd;
+ struct arm_smmu_cmdq_batch cmds;
+
+ arm_smmu_atc_inv_to_cmd(IOMMU_NO_PASID, 0, 0, &cmd);
+
+ cmds.num = 0;
+ for (i = 0; i < master->num_streams; i++) {
+ cmd.atc.sid = master->streams[i].id;
+ arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd);
+ }
+
+ return arm_smmu_cmdq_batch_submit(master->smmu, &cmds);
+}
+
+int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
+ unsigned long iova, size_t size)
+{
+ int i;
+ unsigned long flags;
+ struct arm_smmu_cmdq_ent cmd;
+ struct arm_smmu_master *master;
+ struct arm_smmu_cmdq_batch cmds;
+
+ if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
+ return 0;
+
+ /*
+ * Ensure that we've completed prior invalidation of the main TLBs
+ * before we read 'nr_ats_masters' in case of a concurrent call to
+ * arm_smmu_enable_ats():
+ *
+ * // unmap() // arm_smmu_enable_ats()
+ * TLBI+SYNC atomic_inc(&nr_ats_masters);
+ * smp_mb(); [...]
+ * atomic_read(&nr_ats_masters); pci_enable_ats() // writel()
+ *
+ * Ensures that we always see the incremented 'nr_ats_masters' count if
+ * ATS was enabled at the PCI device before completion of the TLBI.
+ */
+ smp_mb();
+ if (!atomic_read(&smmu_domain->nr_ats_masters))
+ return 0;
+
+ arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
+
+ cmds.num = 0;
+
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+ if (!master->ats_enabled)
+ continue;
+
+ for (i = 0; i < master->num_streams; i++) {
+ cmd.atc.sid = master->streams[i].id;
+ arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
+ }
+ }
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds);
+}
+
+/* IO_PGTABLE API */
+static void arm_smmu_tlb_inv_context(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cmdq_ent cmd;
+
+ /*
+ * NOTE: when io-pgtable is in non-strict mode, we may get here with
+ * PTEs previously cleared by unmaps on the current CPU not yet visible
+ * to the SMMU. We are relying on the dma_wmb() implicit during cmd
+ * insertion to guarantee those are observed before the TLBI. Do be
+ * careful, 007.
+ */
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
+ } else {
+ cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
+ cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
+ arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
+ }
+ arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0);
+}
+
+static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
+ unsigned long iova, size_t size,
+ size_t granule,
+ struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ unsigned long end = iova + size, num_pages = 0, tg = 0;
+ size_t inv_range = granule;
+ struct arm_smmu_cmdq_batch cmds;
+
+ if (!size)
+ return;
+
+ if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
+ /* Get the leaf page size */
+ tg = __ffs(smmu_domain->domain.pgsize_bitmap);
+
+ num_pages = size >> tg;
+
+ /* Convert page size of 12,14,16 (log2) to 1,2,3 */
+ cmd->tlbi.tg = (tg - 10) / 2;
+
+ /*
+ * Determine what level the granule is at. For non-leaf, both
+ * io-pgtable and SVA pass a nominal last-level granule because
+ * they don't know what level(s) actually apply, so ignore that
+ * and leave TTL=0. However for various errata reasons we still
+ * want to use a range command, so avoid the SVA corner case
+ * where both scale and num could be 0 as well.
+ */
+ if (cmd->tlbi.leaf)
+ cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
+ else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1)
+ num_pages++;
+ }
+
+ cmds.num = 0;
+
+ while (iova < end) {
+ if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
+ /*
+ * On each iteration of the loop, the range is 5 bits
+ * worth of the aligned size remaining.
+ * The range in pages is:
+ *
+ * range = (num_pages & (0x1f << __ffs(num_pages)))
+ */
+ unsigned long scale, num;
+
+ /* Determine the power of 2 multiple number of pages */
+ scale = __ffs(num_pages);
+ cmd->tlbi.scale = scale;
+
+ /* Determine how many chunks of 2^scale size we have */
+ num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX;
+ cmd->tlbi.num = num - 1;
+
+ /* range is num * 2^scale * pgsize */
+ inv_range = num << (scale + tg);
+
+ /* Clear out the lower order bits for the next iteration */
+ num_pages -= num << scale;
+ }
+
+ cmd->tlbi.addr = iova;
+ arm_smmu_cmdq_batch_add(smmu, &cmds, cmd);
+ iova += inv_range;
+ }
+ arm_smmu_cmdq_batch_submit(smmu, &cmds);
+}
+
+static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
+ size_t granule, bool leaf,
+ struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_cmdq_ent cmd = {
+ .tlbi = {
+ .leaf = leaf,
+ },
+ };
+
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
+ CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA;
+ cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
+ } else {
+ cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
+ cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
+ }
+ __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
+
+ /*
+ * Unfortunately, this can't be leaf-only since we may have
+ * zapped an entire table.
+ */
+ arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, iova, size);
+}
+
+void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
+ size_t granule, bool leaf,
+ struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
+ CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA,
+ .tlbi = {
+ .asid = asid,
+ .leaf = leaf,
+ },
+ };
+
+ __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
+}
+
+static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct iommu_domain *domain = &smmu_domain->domain;
+
+ iommu_iotlb_gather_add_page(domain, gather, iova, granule);
+}
+
+static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_range_domain(iova, size, granule, false, cookie);
+}
+
+static const struct iommu_flush_ops arm_smmu_flush_ops = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk,
+ .tlb_add_page = arm_smmu_tlb_inv_page_nosync,
+};
+
+/* IOMMU API */
+static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
+{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+ switch (cap) {
+ case IOMMU_CAP_CACHE_COHERENCY:
+ /* Assume that a coherent TCU implies coherent TBUs */
+ return master->smmu->features & ARM_SMMU_FEAT_COHERENCY;
+ case IOMMU_CAP_NOEXEC:
+ case IOMMU_CAP_DEFERRED_FLUSH:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
+{
+ struct arm_smmu_domain *smmu_domain;
+
+ if (type == IOMMU_DOMAIN_SVA)
+ return arm_smmu_sva_domain_alloc();
+
+ if (type != IOMMU_DOMAIN_UNMANAGED &&
+ type != IOMMU_DOMAIN_DMA &&
+ type != IOMMU_DOMAIN_IDENTITY)
+ return NULL;
+
+ /*
+ * Allocate the domain and initialise some of its data structures.
+ * We can't really do anything meaningful until we've added a
+ * master.
+ */
+ smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
+ if (!smmu_domain)
+ return NULL;
+
+ mutex_init(&smmu_domain->init_mutex);
+ INIT_LIST_HEAD(&smmu_domain->devices);
+ spin_lock_init(&smmu_domain->devices_lock);
+ INIT_LIST_HEAD(&smmu_domain->mmu_notifiers);
+
+ return &smmu_domain->domain;
+}
+
+static void arm_smmu_domain_free(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+
+ /* Free the CD and ASID, if we allocated them */
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+
+ /* Prevent SVA from touching the CD while we're freeing it */
+ mutex_lock(&arm_smmu_asid_lock);
+ if (cfg->cdcfg.cdtab)
+ arm_smmu_free_cd_tables(smmu_domain);
+ arm_smmu_free_asid(&cfg->cd);
+ mutex_unlock(&arm_smmu_asid_lock);
+ } else {
+ struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
+ if (cfg->vmid)
+ ida_free(&smmu->vmid_map, cfg->vmid);
+ }
+
+ kfree(smmu_domain);
+}
+
+static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_master *master,
+ struct io_pgtable_cfg *pgtbl_cfg)
+{
+ int ret;
+ u32 asid;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+ typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
+
+ refcount_set(&cfg->cd.refs, 1);
+
+ /* Prevent SVA from modifying the ASID until it is written to the CD */
+ mutex_lock(&arm_smmu_asid_lock);
+ ret = xa_alloc(&arm_smmu_asid_xa, &asid, &cfg->cd,
+ XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
+ if (ret)
+ goto out_unlock;
+
+ cfg->s1cdmax = master->ssid_bits;
+
+ smmu_domain->stall_enabled = master->stall_enabled;
+
+ ret = arm_smmu_alloc_cd_tables(smmu_domain);
+ if (ret)
+ goto out_free_asid;
+
+ cfg->cd.asid = (u16)asid;
+ cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+ cfg->cd.tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
+ CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
+ cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair;
+
+ /*
+ * Note that this will end up calling arm_smmu_sync_cd() before
+ * the master has been added to the devices list for this domain.
+ * This isn't an issue because the STE hasn't been installed yet.
+ */
+ ret = arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, &cfg->cd);
+ if (ret)
+ goto out_free_cd_tables;
+
+ mutex_unlock(&arm_smmu_asid_lock);
+ return 0;
+
+out_free_cd_tables:
+ arm_smmu_free_cd_tables(smmu_domain);
+out_free_asid:
+ arm_smmu_free_asid(&cfg->cd);
+out_unlock:
+ mutex_unlock(&arm_smmu_asid_lock);
+ return ret;
+}
+
+static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_master *master,
+ struct io_pgtable_cfg *pgtbl_cfg)
+{
+ int vmid;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
+ typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
+
+ /* Reserve VMID 0 for stage-2 bypass STEs */
+ vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1,
+ GFP_KERNEL);
+ if (vmid < 0)
+ return vmid;
+
+ vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+ cfg->vmid = (u16)vmid;
+ cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
+ cfg->vtcr = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
+ return 0;
+}
+
+static int arm_smmu_domain_finalise(struct iommu_domain *domain,
+ struct arm_smmu_master *master)
+{
+ int ret;
+ unsigned long ias, oas;
+ enum io_pgtable_fmt fmt;
+ struct io_pgtable_cfg pgtbl_cfg;
+ struct io_pgtable_ops *pgtbl_ops;
+ int (*finalise_stage_fn)(struct arm_smmu_domain *,
+ struct arm_smmu_master *,
+ struct io_pgtable_cfg *);
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
+ return 0;
+ }
+
+ /* Restrict the stage to what we can actually support */
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+
+ switch (smmu_domain->stage) {
+ case ARM_SMMU_DOMAIN_S1:
+ ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48;
+ ias = min_t(unsigned long, ias, VA_BITS);
+ oas = smmu->ias;
+ fmt = ARM_64_LPAE_S1;
+ finalise_stage_fn = arm_smmu_domain_finalise_s1;
+ break;
+ case ARM_SMMU_DOMAIN_NESTED:
+ case ARM_SMMU_DOMAIN_S2:
+ ias = smmu->ias;
+ oas = smmu->oas;
+ fmt = ARM_64_LPAE_S2;
+ finalise_stage_fn = arm_smmu_domain_finalise_s2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = smmu->pgsize_bitmap,
+ .ias = ias,
+ .oas = oas,
+ .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
+ .tlb = &arm_smmu_flush_ops,
+ .iommu_dev = smmu->dev,
+ };
+
+ pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
+ if (!pgtbl_ops)
+ return -ENOMEM;
+
+ domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+ domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
+ domain->geometry.force_aperture = true;
+
+ ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
+ if (ret < 0) {
+ free_io_pgtable_ops(pgtbl_ops);
+ return ret;
+ }
+
+ smmu_domain->pgtbl_ops = pgtbl_ops;
+ return 0;
+}
+
+static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
+{
+ __le64 *step;
+ struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
+
+ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
+ struct arm_smmu_strtab_l1_desc *l1_desc;
+ int idx;
+
+ /* Two-level walk */
+ idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
+ l1_desc = &cfg->l1_desc[idx];
+ idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
+ step = &l1_desc->l2ptr[idx];
+ } else {
+ /* Simple linear lookup */
+ step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
+ }
+
+ return step;
+}
+
+static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
+{
+ int i, j;
+ struct arm_smmu_device *smmu = master->smmu;
+
+ for (i = 0; i < master->num_streams; ++i) {
+ u32 sid = master->streams[i].id;
+ __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
+
+ /* Bridged PCI devices may end up with duplicated IDs */
+ for (j = 0; j < i; j++)
+ if (master->streams[j].id == sid)
+ break;
+ if (j < i)
+ continue;
+
+ arm_smmu_write_strtab_ent(master, sid, step);
+ }
+}
+
+static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
+{
+ struct device *dev = master->dev;
+ struct arm_smmu_device *smmu = master->smmu;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ if (!(smmu->features & ARM_SMMU_FEAT_ATS))
+ return false;
+
+ if (!(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS))
+ return false;
+
+ return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev));
+}
+
+static void arm_smmu_enable_ats(struct arm_smmu_master *master)
+{
+ size_t stu;
+ struct pci_dev *pdev;
+ struct arm_smmu_device *smmu = master->smmu;
+ struct arm_smmu_domain *smmu_domain = master->domain;
+
+ /* Don't enable ATS at the endpoint if it's not enabled in the STE */
+ if (!master->ats_enabled)
+ return;
+
+ /* Smallest Translation Unit: log2 of the smallest supported granule */
+ stu = __ffs(smmu->pgsize_bitmap);
+ pdev = to_pci_dev(master->dev);
+
+ atomic_inc(&smmu_domain->nr_ats_masters);
+ arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0);
+ if (pci_enable_ats(pdev, stu))
+ dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
+}
+
+static void arm_smmu_disable_ats(struct arm_smmu_master *master)
+{
+ struct arm_smmu_domain *smmu_domain = master->domain;
+
+ if (!master->ats_enabled)
+ return;
+
+ pci_disable_ats(to_pci_dev(master->dev));
+ /*
+ * Ensure ATS is disabled at the endpoint before we issue the
+ * ATC invalidation via the SMMU.
+ */
+ wmb();
+ arm_smmu_atc_inv_master(master);
+ atomic_dec(&smmu_domain->nr_ats_masters);
+}
+
+static int arm_smmu_enable_pasid(struct arm_smmu_master *master)
+{
+ int ret;
+ int features;
+ int num_pasids;
+ struct pci_dev *pdev;
+
+ if (!dev_is_pci(master->dev))
+ return -ENODEV;
+
+ pdev = to_pci_dev(master->dev);
+
+ features = pci_pasid_features(pdev);
+ if (features < 0)
+ return features;
+
+ num_pasids = pci_max_pasids(pdev);
+ if (num_pasids <= 0)
+ return num_pasids;
+
+ ret = pci_enable_pasid(pdev, features);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable PASID\n");
+ return ret;
+ }
+
+ master->ssid_bits = min_t(u8, ilog2(num_pasids),
+ master->smmu->ssid_bits);
+ return 0;
+}
+
+static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
+{
+ struct pci_dev *pdev;
+
+ if (!dev_is_pci(master->dev))
+ return;
+
+ pdev = to_pci_dev(master->dev);
+
+ if (!pdev->pasid_enabled)
+ return;
+
+ master->ssid_bits = 0;
+ pci_disable_pasid(pdev);
+}
+
+static void arm_smmu_detach_dev(struct arm_smmu_master *master)
+{
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = master->domain;
+
+ if (!smmu_domain)
+ return;
+
+ arm_smmu_disable_ats(master);
+
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_del(&master->domain_head);
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ master->domain = NULL;
+ master->ats_enabled = false;
+ arm_smmu_install_ste_for_dev(master);
+}
+
+static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct arm_smmu_device *smmu;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_master *master;
+
+ if (!fwspec)
+ return -ENOENT;
+
+ master = dev_iommu_priv_get(dev);
+ smmu = master->smmu;
+
+ /*
+ * Checking that SVA is disabled ensures that this device isn't bound to
+ * any mm, and can be safely detached from its old domain. Bonds cannot
+ * be removed concurrently since we're holding the group mutex.
+ */
+ if (arm_smmu_master_sva_enabled(master)) {
+ dev_err(dev, "cannot attach - SVA enabled\n");
+ return -EBUSY;
+ }
+
+ arm_smmu_detach_dev(master);
+
+ mutex_lock(&smmu_domain->init_mutex);
+
+ if (!smmu_domain->smmu) {
+ smmu_domain->smmu = smmu;
+ ret = arm_smmu_domain_finalise(domain, master);
+ if (ret) {
+ smmu_domain->smmu = NULL;
+ goto out_unlock;
+ }
+ } else if (smmu_domain->smmu != smmu) {
+ ret = -EINVAL;
+ goto out_unlock;
+ } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
+ master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) {
+ ret = -EINVAL;
+ goto out_unlock;
+ } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
+ smmu_domain->stall_enabled != master->stall_enabled) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ master->domain = smmu_domain;
+
+ /*
+ * The SMMU does not support enabling ATS with bypass. When the STE is
+ * in bypass (STE.Config[2:0] == 0b100), ATS Translation Requests and
+ * Translated transactions are denied as though ATS is disabled for the
+ * stream (STE.EATS == 0b00), causing F_BAD_ATS_TREQ and
+ * F_TRANSL_FORBIDDEN events (IHI0070Ea 5.2 Stream Table Entry).
+ */
+ if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
+ master->ats_enabled = arm_smmu_ats_supported(master);
+
+ arm_smmu_install_ste_for_dev(master);
+
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_add(&master->domain_head, &smmu_domain->devices);
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ arm_smmu_enable_ats(master);
+
+out_unlock:
+ mutex_unlock(&smmu_domain->init_mutex);
+ return ret;
+}
+
+static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
+{
+ struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
+
+ if (!ops)
+ return -ENODEV;
+
+ return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
+}
+
+static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+
+ if (!ops)
+ return 0;
+
+ return ops->unmap_pages(ops, iova, pgsize, pgcount, gather);
+}
+
+static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ if (smmu_domain->smmu)
+ arm_smmu_tlb_inv_context(smmu_domain);
+}
+
+static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ if (!gather->pgsize)
+ return;
+
+ arm_smmu_tlb_inv_range_domain(gather->start,
+ gather->end - gather->start + 1,
+ gather->pgsize, true, smmu_domain);
+}
+
+static phys_addr_t
+arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
+{
+ struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
+
+ if (!ops)
+ return 0;
+
+ return ops->iova_to_phys(ops, iova);
+}
+
+static struct platform_driver arm_smmu_driver;
+
+static
+struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
+{
+ struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
+ fwnode);
+ put_device(dev);
+ return dev ? dev_get_drvdata(dev) : NULL;
+}
+
+static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
+{
+ unsigned long limit = smmu->strtab_cfg.num_l1_ents;
+
+ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
+ limit *= 1UL << STRTAB_SPLIT;
+
+ return sid < limit;
+}
+
+static int arm_smmu_init_sid_strtab(struct arm_smmu_device *smmu, u32 sid)
+{
+ /* Check the SIDs are in range of the SMMU and our stream table */
+ if (!arm_smmu_sid_in_range(smmu, sid))
+ return -ERANGE;
+
+ /* Ensure l2 strtab is initialised */
+ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
+ return arm_smmu_init_l2_strtab(smmu, sid);
+
+ return 0;
+}
+
+static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
+ struct arm_smmu_master *master)
+{
+ int i;
+ int ret = 0;
+ struct arm_smmu_stream *new_stream, *cur_stream;
+ struct rb_node **new_node, *parent_node = NULL;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
+
+ master->streams = kcalloc(fwspec->num_ids, sizeof(*master->streams),
+ GFP_KERNEL);
+ if (!master->streams)
+ return -ENOMEM;
+ master->num_streams = fwspec->num_ids;
+
+ mutex_lock(&smmu->streams_mutex);
+ for (i = 0; i < fwspec->num_ids; i++) {
+ u32 sid = fwspec->ids[i];
+
+ new_stream = &master->streams[i];
+ new_stream->id = sid;
+ new_stream->master = master;
+
+ ret = arm_smmu_init_sid_strtab(smmu, sid);
+ if (ret)
+ break;
+
+ /* Insert into SID tree */
+ new_node = &(smmu->streams.rb_node);
+ while (*new_node) {
+ cur_stream = rb_entry(*new_node, struct arm_smmu_stream,
+ node);
+ parent_node = *new_node;
+ if (cur_stream->id > new_stream->id) {
+ new_node = &((*new_node)->rb_left);
+ } else if (cur_stream->id < new_stream->id) {
+ new_node = &((*new_node)->rb_right);
+ } else {
+ dev_warn(master->dev,
+ "stream %u already in tree\n",
+ cur_stream->id);
+ ret = -EINVAL;
+ break;
+ }
+ }
+ if (ret)
+ break;
+
+ rb_link_node(&new_stream->node, parent_node, new_node);
+ rb_insert_color(&new_stream->node, &smmu->streams);
+ }
+
+ if (ret) {
+ for (i--; i >= 0; i--)
+ rb_erase(&master->streams[i].node, &smmu->streams);
+ kfree(master->streams);
+ }
+ mutex_unlock(&smmu->streams_mutex);
+
+ return ret;
+}
+
+static void arm_smmu_remove_master(struct arm_smmu_master *master)
+{
+ int i;
+ struct arm_smmu_device *smmu = master->smmu;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
+
+ if (!smmu || !master->streams)
+ return;
+
+ mutex_lock(&smmu->streams_mutex);
+ for (i = 0; i < fwspec->num_ids; i++)
+ rb_erase(&master->streams[i].node, &smmu->streams);
+ mutex_unlock(&smmu->streams_mutex);
+
+ kfree(master->streams);
+}
+
+static struct iommu_ops arm_smmu_ops;
+
+static struct iommu_device *arm_smmu_probe_device(struct device *dev)
+{
+ int ret;
+ struct arm_smmu_device *smmu;
+ struct arm_smmu_master *master;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ if (!fwspec || fwspec->ops != &arm_smmu_ops)
+ return ERR_PTR(-ENODEV);
+
+ if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
+ return ERR_PTR(-EBUSY);
+
+ smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
+ if (!smmu)
+ return ERR_PTR(-ENODEV);
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return ERR_PTR(-ENOMEM);
+
+ master->dev = dev;
+ master->smmu = smmu;
+ INIT_LIST_HEAD(&master->bonds);
+ dev_iommu_priv_set(dev, master);
+
+ ret = arm_smmu_insert_master(smmu, master);
+ if (ret)
+ goto err_free_master;
+
+ device_property_read_u32(dev, "pasid-num-bits", &master->ssid_bits);
+ master->ssid_bits = min(smmu->ssid_bits, master->ssid_bits);
+
+ /*
+ * Note that PASID must be enabled before, and disabled after ATS:
+ * PCI Express Base 4.0r1.0 - 10.5.1.3 ATS Control Register
+ *
+ * Behavior is undefined if this bit is Set and the value of the PASID
+ * Enable, Execute Requested Enable, or Privileged Mode Requested bits
+ * are changed.
+ */
+ arm_smmu_enable_pasid(master);
+
+ if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB))
+ master->ssid_bits = min_t(u8, master->ssid_bits,
+ CTXDESC_LINEAR_CDMAX);
+
+ if ((smmu->features & ARM_SMMU_FEAT_STALLS &&
+ device_property_read_bool(dev, "dma-can-stall")) ||
+ smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
+ master->stall_enabled = true;
+
+ return &smmu->iommu;
+
+err_free_master:
+ kfree(master);
+ dev_iommu_priv_set(dev, NULL);
+ return ERR_PTR(ret);
+}
+
+static void arm_smmu_release_device(struct device *dev)
+{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+ if (WARN_ON(arm_smmu_master_sva_enabled(master)))
+ iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
+ arm_smmu_detach_dev(master);
+ arm_smmu_disable_pasid(master);
+ arm_smmu_remove_master(master);
+ kfree(master);
+}
+
+static struct iommu_group *arm_smmu_device_group(struct device *dev)
+{
+ struct iommu_group *group;
+
+ /*
+ * We don't support devices sharing stream IDs other than PCI RID
+ * aliases, since the necessary ID-to-device lookup becomes rather
+ * impractical given a potential sparse 32-bit stream ID space.
+ */
+ if (dev_is_pci(dev))
+ group = pci_device_group(dev);
+ else
+ group = generic_device_group(dev);
+
+ return group;
+}
+
+static int arm_smmu_enable_nesting(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ int ret = 0;
+
+ mutex_lock(&smmu_domain->init_mutex);
+ if (smmu_domain->smmu)
+ ret = -EPERM;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ mutex_unlock(&smmu_domain->init_mutex);
+
+ return ret;
+}
+
+static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+ return iommu_fwspec_add_ids(dev, args->args, 1);
+}
+
+static void arm_smmu_get_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_resv_region *region;
+ int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+
+ region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
+ prot, IOMMU_RESV_SW_MSI, GFP_KERNEL);
+ if (!region)
+ return;
+
+ list_add_tail(&region->list, head);
+
+ iommu_dma_get_resv_regions(dev, head);
+}
+
+static int arm_smmu_dev_enable_feature(struct device *dev,
+ enum iommu_dev_features feat)
+{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+ if (!master)
+ return -ENODEV;
+
+ switch (feat) {
+ case IOMMU_DEV_FEAT_IOPF:
+ if (!arm_smmu_master_iopf_supported(master))
+ return -EINVAL;
+ if (master->iopf_enabled)
+ return -EBUSY;
+ master->iopf_enabled = true;
+ return 0;
+ case IOMMU_DEV_FEAT_SVA:
+ if (!arm_smmu_master_sva_supported(master))
+ return -EINVAL;
+ if (arm_smmu_master_sva_enabled(master))
+ return -EBUSY;
+ return arm_smmu_master_enable_sva(master);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int arm_smmu_dev_disable_feature(struct device *dev,
+ enum iommu_dev_features feat)
+{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+ if (!master)
+ return -EINVAL;
+
+ switch (feat) {
+ case IOMMU_DEV_FEAT_IOPF:
+ if (!master->iopf_enabled)
+ return -EINVAL;
+ if (master->sva_enabled)
+ return -EBUSY;
+ master->iopf_enabled = false;
+ return 0;
+ case IOMMU_DEV_FEAT_SVA:
+ if (!arm_smmu_master_sva_enabled(master))
+ return -EINVAL;
+ return arm_smmu_master_disable_sva(master);
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * HiSilicon PCIe tune and trace device can be used to trace TLP headers on the
+ * PCIe link and save the data to memory by DMA. The hardware is restricted to
+ * use identity mapping only.
+ */
+#define IS_HISI_PTT_DEVICE(pdev) ((pdev)->vendor == PCI_VENDOR_ID_HUAWEI && \
+ (pdev)->device == 0xa12e)
+
+static int arm_smmu_def_domain_type(struct device *dev)
+{
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (IS_HISI_PTT_DEVICE(pdev))
+ return IOMMU_DOMAIN_IDENTITY;
+ }
+
+ return 0;
+}
+
+static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
+{
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev_pasid(dev, pasid, IOMMU_DOMAIN_SVA);
+ if (WARN_ON(IS_ERR(domain)) || !domain)
+ return;
+
+ arm_smmu_sva_remove_dev_pasid(domain, dev, pasid);
+}
+
+static struct iommu_ops arm_smmu_ops = {
+ .capable = arm_smmu_capable,
+ .domain_alloc = arm_smmu_domain_alloc,
+ .probe_device = arm_smmu_probe_device,
+ .release_device = arm_smmu_release_device,
+ .device_group = arm_smmu_device_group,
+ .of_xlate = arm_smmu_of_xlate,
+ .get_resv_regions = arm_smmu_get_resv_regions,
+ .remove_dev_pasid = arm_smmu_remove_dev_pasid,
+ .dev_enable_feat = arm_smmu_dev_enable_feature,
+ .dev_disable_feat = arm_smmu_dev_disable_feature,
+ .page_response = arm_smmu_page_response,
+ .def_domain_type = arm_smmu_def_domain_type,
+ .pgsize_bitmap = -1UL, /* Restricted during device attach */
+ .owner = THIS_MODULE,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = arm_smmu_attach_dev,
+ .map_pages = arm_smmu_map_pages,
+ .unmap_pages = arm_smmu_unmap_pages,
+ .flush_iotlb_all = arm_smmu_flush_iotlb_all,
+ .iotlb_sync = arm_smmu_iotlb_sync,
+ .iova_to_phys = arm_smmu_iova_to_phys,
+ .enable_nesting = arm_smmu_enable_nesting,
+ .free = arm_smmu_domain_free,
+ }
+};
+
+/* Probing and initialisation functions */
+static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
+ struct arm_smmu_queue *q,
+ void __iomem *page,
+ unsigned long prod_off,
+ unsigned long cons_off,
+ size_t dwords, const char *name)
+{
+ size_t qsz;
+
+ do {
+ qsz = ((1 << q->llq.max_n_shift) * dwords) << 3;
+ q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
+ GFP_KERNEL);
+ if (q->base || qsz < PAGE_SIZE)
+ break;
+
+ q->llq.max_n_shift--;
+ } while (1);
+
+ if (!q->base) {
+ dev_err(smmu->dev,
+ "failed to allocate queue (0x%zx bytes) for %s\n",
+ qsz, name);
+ return -ENOMEM;
+ }
+
+ if (!WARN_ON(q->base_dma & (qsz - 1))) {
+ dev_info(smmu->dev, "allocated %u entries for %s\n",
+ 1 << q->llq.max_n_shift, name);
+ }
+
+ q->prod_reg = page + prod_off;
+ q->cons_reg = page + cons_off;
+ q->ent_dwords = dwords;
+
+ q->q_base = Q_BASE_RWA;
+ q->q_base |= q->base_dma & Q_BASE_ADDR_MASK;
+ q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift);
+
+ q->llq.prod = q->llq.cons = 0;
+ return 0;
+}
+
+static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu)
+{
+ struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
+ unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
+
+ atomic_set(&cmdq->owner_prod, 0);
+ atomic_set(&cmdq->lock, 0);
+
+ cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents,
+ GFP_KERNEL);
+ if (!cmdq->valid_map)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
+{
+ int ret;
+
+ /* cmdq */
+ ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, smmu->base,
+ ARM_SMMU_CMDQ_PROD, ARM_SMMU_CMDQ_CONS,
+ CMDQ_ENT_DWORDS, "cmdq");
+ if (ret)
+ return ret;
+
+ ret = arm_smmu_cmdq_init(smmu);
+ if (ret)
+ return ret;
+
+ /* evtq */
+ ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, smmu->page1,
+ ARM_SMMU_EVTQ_PROD, ARM_SMMU_EVTQ_CONS,
+ EVTQ_ENT_DWORDS, "evtq");
+ if (ret)
+ return ret;
+
+ if ((smmu->features & ARM_SMMU_FEAT_SVA) &&
+ (smmu->features & ARM_SMMU_FEAT_STALLS)) {
+ smmu->evtq.iopf = iopf_queue_alloc(dev_name(smmu->dev));
+ if (!smmu->evtq.iopf)
+ return -ENOMEM;
+ }
+
+ /* priq */
+ if (!(smmu->features & ARM_SMMU_FEAT_PRI))
+ return 0;
+
+ return arm_smmu_init_one_queue(smmu, &smmu->priq.q, smmu->page1,
+ ARM_SMMU_PRIQ_PROD, ARM_SMMU_PRIQ_CONS,
+ PRIQ_ENT_DWORDS, "priq");
+}
+
+static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
+{
+ unsigned int i;
+ struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
+ void *strtab = smmu->strtab_cfg.strtab;
+
+ cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents,
+ sizeof(*cfg->l1_desc), GFP_KERNEL);
+ if (!cfg->l1_desc)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->num_l1_ents; ++i) {
+ arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
+ strtab += STRTAB_L1_DESC_DWORDS << 3;
+ }
+
+ return 0;
+}
+
+static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
+{
+ void *strtab;
+ u64 reg;
+ u32 size, l1size;
+ struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
+
+ /* Calculate the L1 size, capped to the SIDSIZE. */
+ size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
+ size = min(size, smmu->sid_bits - STRTAB_SPLIT);
+ cfg->num_l1_ents = 1 << size;
+
+ size += STRTAB_SPLIT;
+ if (size < smmu->sid_bits)
+ dev_warn(smmu->dev,
+ "2-level strtab only covers %u/%u bits of SID\n",
+ size, smmu->sid_bits);
+
+ l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
+ strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
+ GFP_KERNEL);
+ if (!strtab) {
+ dev_err(smmu->dev,
+ "failed to allocate l1 stream table (%u bytes)\n",
+ l1size);
+ return -ENOMEM;
+ }
+ cfg->strtab = strtab;
+
+ /* Configure strtab_base_cfg for 2 levels */
+ reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_2LVL);
+ reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, size);
+ reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT);
+ cfg->strtab_base_cfg = reg;
+
+ return arm_smmu_init_l1_strtab(smmu);
+}
+
+static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
+{
+ void *strtab;
+ u64 reg;
+ u32 size;
+ struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
+
+ size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
+ strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
+ GFP_KERNEL);
+ if (!strtab) {
+ dev_err(smmu->dev,
+ "failed to allocate linear stream table (%u bytes)\n",
+ size);
+ return -ENOMEM;
+ }
+ cfg->strtab = strtab;
+ cfg->num_l1_ents = 1 << smmu->sid_bits;
+
+ /* Configure strtab_base_cfg for a linear table covering all SIDs */
+ reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_LINEAR);
+ reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
+ cfg->strtab_base_cfg = reg;
+
+ arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false);
+ return 0;
+}
+
+static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
+{
+ u64 reg;
+ int ret;
+
+ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
+ ret = arm_smmu_init_strtab_2lvl(smmu);
+ else
+ ret = arm_smmu_init_strtab_linear(smmu);
+
+ if (ret)
+ return ret;
+
+ /* Set the strtab base address */
+ reg = smmu->strtab_cfg.strtab_dma & STRTAB_BASE_ADDR_MASK;
+ reg |= STRTAB_BASE_RA;
+ smmu->strtab_cfg.strtab_base = reg;
+
+ ida_init(&smmu->vmid_map);
+
+ return 0;
+}
+
+static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
+{
+ int ret;
+
+ mutex_init(&smmu->streams_mutex);
+ smmu->streams = RB_ROOT;
+
+ ret = arm_smmu_init_queues(smmu);
+ if (ret)
+ return ret;
+
+ return arm_smmu_init_strtab(smmu);
+}
+
+static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
+ unsigned int reg_off, unsigned int ack_off)
+{
+ u32 reg;
+
+ writel_relaxed(val, smmu->base + reg_off);
+ return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
+ 1, ARM_SMMU_POLL_TIMEOUT_US);
+}
+
+/* GBPA is "special" */
+static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
+{
+ int ret;
+ u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
+
+ ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
+ 1, ARM_SMMU_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ reg &= ~clr;
+ reg |= set;
+ writel_relaxed(reg | GBPA_UPDATE, gbpa);
+ ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
+ 1, ARM_SMMU_POLL_TIMEOUT_US);
+
+ if (ret)
+ dev_err(smmu->dev, "GBPA not responding to update\n");
+ return ret;
+}
+
+static void arm_smmu_free_msis(void *data)
+{
+ struct device *dev = data;
+ platform_msi_domain_free_irqs(dev);
+}
+
+static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ phys_addr_t doorbell;
+ struct device *dev = msi_desc_to_dev(desc);
+ struct arm_smmu_device *smmu = dev_get_drvdata(dev);
+ phys_addr_t *cfg = arm_smmu_msi_cfg[desc->msi_index];
+
+ doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
+ doorbell &= MSI_CFG0_ADDR_MASK;
+
+ writeq_relaxed(doorbell, smmu->base + cfg[0]);
+ writel_relaxed(msg->data, smmu->base + cfg[1]);
+ writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
+}
+
+static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
+{
+ int ret, nvec = ARM_SMMU_MAX_MSIS;
+ struct device *dev = smmu->dev;
+
+ /* Clear the MSI address regs */
+ writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
+ writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
+
+ if (smmu->features & ARM_SMMU_FEAT_PRI)
+ writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
+ else
+ nvec--;
+
+ if (!(smmu->features & ARM_SMMU_FEAT_MSI))
+ return;
+
+ if (!dev->msi.domain) {
+ dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n");
+ return;
+ }
+
+ /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
+ ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
+ if (ret) {
+ dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n");
+ return;
+ }
+
+ smmu->evtq.q.irq = msi_get_virq(dev, EVTQ_MSI_INDEX);
+ smmu->gerr_irq = msi_get_virq(dev, GERROR_MSI_INDEX);
+ smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
+
+ /* Add callback to free MSIs on teardown */
+ devm_add_action(dev, arm_smmu_free_msis, dev);
+}
+
+static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
+{
+ int irq, ret;
+
+ arm_smmu_setup_msis(smmu);
+
+ /* Request interrupt lines */
+ irq = smmu->evtq.q.irq;
+ if (irq) {
+ ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
+ arm_smmu_evtq_thread,
+ IRQF_ONESHOT,
+ "arm-smmu-v3-evtq", smmu);
+ if (ret < 0)
+ dev_warn(smmu->dev, "failed to enable evtq irq\n");
+ } else {
+ dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n");
+ }
+
+ irq = smmu->gerr_irq;
+ if (irq) {
+ ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
+ 0, "arm-smmu-v3-gerror", smmu);
+ if (ret < 0)
+ dev_warn(smmu->dev, "failed to enable gerror irq\n");
+ } else {
+ dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n");
+ }
+
+ if (smmu->features & ARM_SMMU_FEAT_PRI) {
+ irq = smmu->priq.q.irq;
+ if (irq) {
+ ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
+ arm_smmu_priq_thread,
+ IRQF_ONESHOT,
+ "arm-smmu-v3-priq",
+ smmu);
+ if (ret < 0)
+ dev_warn(smmu->dev,
+ "failed to enable priq irq\n");
+ } else {
+ dev_warn(smmu->dev, "no priq irq - PRI will be broken\n");
+ }
+ }
+}
+
+static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
+{
+ int ret, irq;
+ u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
+
+ /* Disable IRQs first */
+ ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
+ ARM_SMMU_IRQ_CTRLACK);
+ if (ret) {
+ dev_err(smmu->dev, "failed to disable irqs\n");
+ return ret;
+ }
+
+ irq = smmu->combined_irq;
+ if (irq) {
+ /*
+ * Cavium ThunderX2 implementation doesn't support unique irq
+ * lines. Use a single irq line for all the SMMUv3 interrupts.
+ */
+ ret = devm_request_threaded_irq(smmu->dev, irq,
+ arm_smmu_combined_irq_handler,
+ arm_smmu_combined_irq_thread,
+ IRQF_ONESHOT,
+ "arm-smmu-v3-combined-irq", smmu);
+ if (ret < 0)
+ dev_warn(smmu->dev, "failed to enable combined irq\n");
+ } else
+ arm_smmu_setup_unique_irqs(smmu);
+
+ if (smmu->features & ARM_SMMU_FEAT_PRI)
+ irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
+
+ /* Enable interrupt generation on the SMMU */
+ ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
+ ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
+ if (ret)
+ dev_warn(smmu->dev, "failed to enable irqs\n");
+
+ return 0;
+}
+
+static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
+{
+ int ret;
+
+ ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
+ if (ret)
+ dev_err(smmu->dev, "failed to clear cr0\n");
+
+ return ret;
+}
+
+static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
+{
+ int ret;
+ u32 reg, enables;
+ struct arm_smmu_cmdq_ent cmd;
+
+ /* Clear CR0 and sync (disables SMMU and queue processing) */
+ reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
+ if (reg & CR0_SMMUEN) {
+ dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
+ WARN_ON(is_kdump_kernel() && !disable_bypass);
+ arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
+ }
+
+ ret = arm_smmu_device_disable(smmu);
+ if (ret)
+ return ret;
+
+ /* CR1 (table and queue memory attributes) */
+ reg = FIELD_PREP(CR1_TABLE_SH, ARM_SMMU_SH_ISH) |
+ FIELD_PREP(CR1_TABLE_OC, CR1_CACHE_WB) |
+ FIELD_PREP(CR1_TABLE_IC, CR1_CACHE_WB) |
+ FIELD_PREP(CR1_QUEUE_SH, ARM_SMMU_SH_ISH) |
+ FIELD_PREP(CR1_QUEUE_OC, CR1_CACHE_WB) |
+ FIELD_PREP(CR1_QUEUE_IC, CR1_CACHE_WB);
+ writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
+
+ /* CR2 (random crap) */
+ reg = CR2_PTM | CR2_RECINVSID;
+
+ if (smmu->features & ARM_SMMU_FEAT_E2H)
+ reg |= CR2_E2H;
+
+ writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
+
+ /* Stream table */
+ writeq_relaxed(smmu->strtab_cfg.strtab_base,
+ smmu->base + ARM_SMMU_STRTAB_BASE);
+ writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
+ smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
+
+ /* Command queue */
+ writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
+ writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
+ writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
+
+ enables = CR0_CMDQEN;
+ ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
+ ARM_SMMU_CR0ACK);
+ if (ret) {
+ dev_err(smmu->dev, "failed to enable command queue\n");
+ return ret;
+ }
+
+ /* Invalidate any cached configuration */
+ cmd.opcode = CMDQ_OP_CFGI_ALL;
+ arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
+
+ /* Invalidate any stale TLB entries */
+ if (smmu->features & ARM_SMMU_FEAT_HYP) {
+ cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
+ arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
+ }
+
+ cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
+ arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
+
+ /* Event queue */
+ writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
+ writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD);
+ writel_relaxed(smmu->evtq.q.llq.cons, smmu->page1 + ARM_SMMU_EVTQ_CONS);
+
+ enables |= CR0_EVTQEN;
+ ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
+ ARM_SMMU_CR0ACK);
+ if (ret) {
+ dev_err(smmu->dev, "failed to enable event queue\n");
+ return ret;
+ }
+
+ /* PRI queue */
+ if (smmu->features & ARM_SMMU_FEAT_PRI) {
+ writeq_relaxed(smmu->priq.q.q_base,
+ smmu->base + ARM_SMMU_PRIQ_BASE);
+ writel_relaxed(smmu->priq.q.llq.prod,
+ smmu->page1 + ARM_SMMU_PRIQ_PROD);
+ writel_relaxed(smmu->priq.q.llq.cons,
+ smmu->page1 + ARM_SMMU_PRIQ_CONS);
+
+ enables |= CR0_PRIQEN;
+ ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
+ ARM_SMMU_CR0ACK);
+ if (ret) {
+ dev_err(smmu->dev, "failed to enable PRI queue\n");
+ return ret;
+ }
+ }
+
+ if (smmu->features & ARM_SMMU_FEAT_ATS) {
+ enables |= CR0_ATSCHK;
+ ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
+ ARM_SMMU_CR0ACK);
+ if (ret) {
+ dev_err(smmu->dev, "failed to enable ATS check\n");
+ return ret;
+ }
+ }
+
+ ret = arm_smmu_setup_irqs(smmu);
+ if (ret) {
+ dev_err(smmu->dev, "failed to setup irqs\n");
+ return ret;
+ }
+
+ if (is_kdump_kernel())
+ enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
+
+ /* Enable the SMMU interface, or ensure bypass */
+ if (!bypass || disable_bypass) {
+ enables |= CR0_SMMUEN;
+ } else {
+ ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
+ if (ret)
+ return ret;
+ }
+ ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
+ ARM_SMMU_CR0ACK);
+ if (ret) {
+ dev_err(smmu->dev, "failed to enable SMMU interface\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+#define IIDR_IMPLEMENTER_ARM 0x43b
+#define IIDR_PRODUCTID_ARM_MMU_600 0x483
+#define IIDR_PRODUCTID_ARM_MMU_700 0x487
+
+static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu)
+{
+ u32 reg;
+ unsigned int implementer, productid, variant, revision;
+
+ reg = readl_relaxed(smmu->base + ARM_SMMU_IIDR);
+ implementer = FIELD_GET(IIDR_IMPLEMENTER, reg);
+ productid = FIELD_GET(IIDR_PRODUCTID, reg);
+ variant = FIELD_GET(IIDR_VARIANT, reg);
+ revision = FIELD_GET(IIDR_REVISION, reg);
+
+ switch (implementer) {
+ case IIDR_IMPLEMENTER_ARM:
+ switch (productid) {
+ case IIDR_PRODUCTID_ARM_MMU_600:
+ /* Arm erratum 1076982 */
+ if (variant == 0 && revision <= 2)
+ smmu->features &= ~ARM_SMMU_FEAT_SEV;
+ /* Arm erratum 1209401 */
+ if (variant < 2)
+ smmu->features &= ~ARM_SMMU_FEAT_NESTING;
+ break;
+ case IIDR_PRODUCTID_ARM_MMU_700:
+ /* Arm erratum 2812531 */
+ smmu->features &= ~ARM_SMMU_FEAT_BTM;
+ smmu->options |= ARM_SMMU_OPT_CMDQ_FORCE_SYNC;
+ /* Arm errata 2268618, 2812531 */
+ smmu->features &= ~ARM_SMMU_FEAT_NESTING;
+ break;
+ }
+ break;
+ }
+}
+
+static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
+{
+ u32 reg;
+ bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
+
+ /* IDR0 */
+ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
+
+ /* 2-level structures */
+ if (FIELD_GET(IDR0_ST_LVL, reg) == IDR0_ST_LVL_2LVL)
+ smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
+
+ if (reg & IDR0_CD2L)
+ smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
+
+ /*
+ * Translation table endianness.
+ * We currently require the same endianness as the CPU, but this
+ * could be changed later by adding a new IO_PGTABLE_QUIRK.
+ */
+ switch (FIELD_GET(IDR0_TTENDIAN, reg)) {
+ case IDR0_TTENDIAN_MIXED:
+ smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
+ break;
+#ifdef __BIG_ENDIAN
+ case IDR0_TTENDIAN_BE:
+ smmu->features |= ARM_SMMU_FEAT_TT_BE;
+ break;
+#else
+ case IDR0_TTENDIAN_LE:
+ smmu->features |= ARM_SMMU_FEAT_TT_LE;
+ break;
+#endif
+ default:
+ dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
+ return -ENXIO;
+ }
+
+ /* Boolean feature flags */
+ if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
+ smmu->features |= ARM_SMMU_FEAT_PRI;
+
+ if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
+ smmu->features |= ARM_SMMU_FEAT_ATS;
+
+ if (reg & IDR0_SEV)
+ smmu->features |= ARM_SMMU_FEAT_SEV;
+
+ if (reg & IDR0_MSI) {
+ smmu->features |= ARM_SMMU_FEAT_MSI;
+ if (coherent && !disable_msipolling)
+ smmu->options |= ARM_SMMU_OPT_MSIPOLL;
+ }
+
+ if (reg & IDR0_HYP) {
+ smmu->features |= ARM_SMMU_FEAT_HYP;
+ if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
+ smmu->features |= ARM_SMMU_FEAT_E2H;
+ }
+
+ /*
+ * The coherency feature as set by FW is used in preference to the ID
+ * register, but warn on mismatch.
+ */
+ if (!!(reg & IDR0_COHACC) != coherent)
+ dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
+ coherent ? "true" : "false");
+
+ switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
+ case IDR0_STALL_MODEL_FORCE:
+ smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
+ fallthrough;
+ case IDR0_STALL_MODEL_STALL:
+ smmu->features |= ARM_SMMU_FEAT_STALLS;
+ }
+
+ if (reg & IDR0_S1P)
+ smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
+
+ if (reg & IDR0_S2P)
+ smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
+
+ if (!(reg & (IDR0_S1P | IDR0_S2P))) {
+ dev_err(smmu->dev, "no translation support!\n");
+ return -ENXIO;
+ }
+
+ /* We only support the AArch64 table format at present */
+ switch (FIELD_GET(IDR0_TTF, reg)) {
+ case IDR0_TTF_AARCH32_64:
+ smmu->ias = 40;
+ fallthrough;
+ case IDR0_TTF_AARCH64:
+ break;
+ default:
+ dev_err(smmu->dev, "AArch64 table format not supported!\n");
+ return -ENXIO;
+ }
+
+ /* ASID/VMID sizes */
+ smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
+ smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
+
+ /* IDR1 */
+ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
+ if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
+ dev_err(smmu->dev, "embedded implementation not supported\n");
+ return -ENXIO;
+ }
+
+ /* Queue sizes, capped to ensure natural alignment */
+ smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
+ FIELD_GET(IDR1_CMDQS, reg));
+ if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {
+ /*
+ * We don't support splitting up batches, so one batch of
+ * commands plus an extra sync needs to fit inside the command
+ * queue. There's also no way we can handle the weird alignment
+ * restrictions on the base pointer for a unit-length queue.
+ */
+ dev_err(smmu->dev, "command queue size <= %d entries not supported\n",
+ CMDQ_BATCH_ENTRIES);
+ return -ENXIO;
+ }
+
+ smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
+ FIELD_GET(IDR1_EVTQS, reg));
+ smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,
+ FIELD_GET(IDR1_PRIQS, reg));
+
+ /* SID/SSID sizes */
+ smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg);
+ smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg);
+ smmu->iommu.max_pasids = 1UL << smmu->ssid_bits;
+
+ /*
+ * If the SMMU supports fewer bits than would fill a single L2 stream
+ * table, use a linear table instead.
+ */
+ if (smmu->sid_bits <= STRTAB_SPLIT)
+ smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
+
+ /* IDR3 */
+ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3);
+ if (FIELD_GET(IDR3_RIL, reg))
+ smmu->features |= ARM_SMMU_FEAT_RANGE_INV;
+
+ /* IDR5 */
+ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
+
+ /* Maximum number of outstanding stalls */
+ smmu->evtq.max_stalls = FIELD_GET(IDR5_STALL_MAX, reg);
+
+ /* Page sizes */
+ if (reg & IDR5_GRAN64K)
+ smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
+ if (reg & IDR5_GRAN16K)
+ smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
+ if (reg & IDR5_GRAN4K)
+ smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
+
+ /* Input address size */
+ if (FIELD_GET(IDR5_VAX, reg) == IDR5_VAX_52_BIT)
+ smmu->features |= ARM_SMMU_FEAT_VAX;
+
+ /* Output address size */
+ switch (FIELD_GET(IDR5_OAS, reg)) {
+ case IDR5_OAS_32_BIT:
+ smmu->oas = 32;
+ break;
+ case IDR5_OAS_36_BIT:
+ smmu->oas = 36;
+ break;
+ case IDR5_OAS_40_BIT:
+ smmu->oas = 40;
+ break;
+ case IDR5_OAS_42_BIT:
+ smmu->oas = 42;
+ break;
+ case IDR5_OAS_44_BIT:
+ smmu->oas = 44;
+ break;
+ case IDR5_OAS_52_BIT:
+ smmu->oas = 52;
+ smmu->pgsize_bitmap |= 1ULL << 42; /* 4TB */
+ break;
+ default:
+ dev_info(smmu->dev,
+ "unknown output address size. Truncating to 48-bit\n");
+ fallthrough;
+ case IDR5_OAS_48_BIT:
+ smmu->oas = 48;
+ }
+
+ if (arm_smmu_ops.pgsize_bitmap == -1UL)
+ arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
+ else
+ arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
+
+ /* Set the DMA mask for our table walker */
+ if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
+ dev_warn(smmu->dev,
+ "failed to set DMA mask for table walker\n");
+
+ smmu->ias = max(smmu->ias, smmu->oas);
+
+ if ((smmu->features & ARM_SMMU_FEAT_TRANS_S1) &&
+ (smmu->features & ARM_SMMU_FEAT_TRANS_S2))
+ smmu->features |= ARM_SMMU_FEAT_NESTING;
+
+ arm_smmu_device_iidr_probe(smmu);
+
+ if (arm_smmu_sva_supported(smmu))
+ smmu->features |= ARM_SMMU_FEAT_SVA;
+
+ dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
+ smmu->ias, smmu->oas, smmu->features);
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu)
+{
+ switch (model) {
+ case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX:
+ smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY;
+ break;
+ case ACPI_IORT_SMMU_V3_HISILICON_HI161X:
+ smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH;
+ break;
+ }
+
+ dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options);
+}
+
+static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
+ struct arm_smmu_device *smmu)
+{
+ struct acpi_iort_smmu_v3 *iort_smmu;
+ struct device *dev = smmu->dev;
+ struct acpi_iort_node *node;
+
+ node = *(struct acpi_iort_node **)dev_get_platdata(dev);
+
+ /* Retrieve SMMUv3 specific data */
+ iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+
+ acpi_smmu_get_options(iort_smmu->model, smmu);
+
+ if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
+ smmu->features |= ARM_SMMU_FEAT_COHERENCY;
+
+ return 0;
+}
+#else
+static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
+ struct arm_smmu_device *smmu)
+{
+ return -ENODEV;
+}
+#endif
+
+static int arm_smmu_device_dt_probe(struct platform_device *pdev,
+ struct arm_smmu_device *smmu)
+{
+ struct device *dev = &pdev->dev;
+ u32 cells;
+ int ret = -EINVAL;
+
+ if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
+ dev_err(dev, "missing #iommu-cells property\n");
+ else if (cells != 1)
+ dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
+ else
+ ret = 0;
+
+ parse_driver_options(smmu);
+
+ if (of_dma_is_coherent(dev->of_node))
+ smmu->features |= ARM_SMMU_FEAT_COHERENCY;
+
+ return ret;
+}
+
+static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
+{
+ if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY)
+ return SZ_64K;
+ else
+ return SZ_128K;
+}
+
+static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
+ resource_size_t size)
+{
+ struct resource res = DEFINE_RES_MEM(start, size);
+
+ return devm_ioremap_resource(dev, &res);
+}
+
+static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
+{
+ struct list_head rmr_list;
+ struct iommu_resv_region *e;
+
+ INIT_LIST_HEAD(&rmr_list);
+ iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+
+ list_for_each_entry(e, &rmr_list, list) {
+ __le64 *step;
+ struct iommu_iort_rmr_data *rmr;
+ int ret, i;
+
+ rmr = container_of(e, struct iommu_iort_rmr_data, rr);
+ for (i = 0; i < rmr->num_sids; i++) {
+ ret = arm_smmu_init_sid_strtab(smmu, rmr->sids[i]);
+ if (ret) {
+ dev_err(smmu->dev, "RMR SID(0x%x) bypass failed\n",
+ rmr->sids[i]);
+ continue;
+ }
+
+ step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]);
+ arm_smmu_init_bypass_stes(step, 1, true);
+ }
+ }
+
+ iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+}
+
+static int arm_smmu_device_probe(struct platform_device *pdev)
+{
+ int irq, ret;
+ struct resource *res;
+ resource_size_t ioaddr;
+ struct arm_smmu_device *smmu;
+ struct device *dev = &pdev->dev;
+ bool bypass;
+
+ smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
+ if (!smmu)
+ return -ENOMEM;
+ smmu->dev = dev;
+
+ if (dev->of_node) {
+ ret = arm_smmu_device_dt_probe(pdev, smmu);
+ } else {
+ ret = arm_smmu_device_acpi_probe(pdev, smmu);
+ if (ret == -ENODEV)
+ return ret;
+ }
+
+ /* Set bypass mode according to firmware probing result */
+ bypass = !!ret;
+
+ /* Base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ if (resource_size(res) < arm_smmu_resource_size(smmu)) {
+ dev_err(dev, "MMIO region too small (%pr)\n", res);
+ return -EINVAL;
+ }
+ ioaddr = res->start;
+
+ /*
+ * Don't map the IMPLEMENTATION DEFINED regions, since they may contain
+ * the PMCG registers which are reserved by the PMU driver.
+ */
+ smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ);
+ if (IS_ERR(smmu->base))
+ return PTR_ERR(smmu->base);
+
+ if (arm_smmu_resource_size(smmu) > SZ_64K) {
+ smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K,
+ ARM_SMMU_REG_SZ);
+ if (IS_ERR(smmu->page1))
+ return PTR_ERR(smmu->page1);
+ } else {
+ smmu->page1 = smmu->base;
+ }
+
+ /* Interrupt lines */
+
+ irq = platform_get_irq_byname_optional(pdev, "combined");
+ if (irq > 0)
+ smmu->combined_irq = irq;
+ else {
+ irq = platform_get_irq_byname_optional(pdev, "eventq");
+ if (irq > 0)
+ smmu->evtq.q.irq = irq;
+
+ irq = platform_get_irq_byname_optional(pdev, "priq");
+ if (irq > 0)
+ smmu->priq.q.irq = irq;
+
+ irq = platform_get_irq_byname_optional(pdev, "gerror");
+ if (irq > 0)
+ smmu->gerr_irq = irq;
+ }
+ /* Probe the h/w */
+ ret = arm_smmu_device_hw_probe(smmu);
+ if (ret)
+ return ret;
+
+ /* Initialise in-memory data structures */
+ ret = arm_smmu_init_structures(smmu);
+ if (ret)
+ return ret;
+
+ /* Record our private device structure */
+ platform_set_drvdata(pdev, smmu);
+
+ /* Check for RMRs and install bypass STEs if any */
+ arm_smmu_rmr_install_bypass_ste(smmu);
+
+ /* Reset the device */
+ ret = arm_smmu_device_reset(smmu, bypass);
+ if (ret)
+ return ret;
+
+ /* And we're up. Go go go! */
+ ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
+ "smmu3.%pa", &ioaddr);
+ if (ret)
+ return ret;
+
+ ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
+ if (ret) {
+ dev_err(dev, "Failed to register iommu\n");
+ iommu_device_sysfs_remove(&smmu->iommu);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void arm_smmu_device_remove(struct platform_device *pdev)
+{
+ struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+
+ iommu_device_unregister(&smmu->iommu);
+ iommu_device_sysfs_remove(&smmu->iommu);
+ arm_smmu_device_disable(smmu);
+ iopf_queue_free(smmu->evtq.iopf);
+ ida_destroy(&smmu->vmid_map);
+}
+
+static void arm_smmu_device_shutdown(struct platform_device *pdev)
+{
+ struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+
+ arm_smmu_device_disable(smmu);
+}
+
+static const struct of_device_id arm_smmu_of_match[] = {
+ { .compatible = "arm,smmu-v3", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
+
+static void arm_smmu_driver_unregister(struct platform_driver *drv)
+{
+ arm_smmu_sva_notifier_synchronize();
+ platform_driver_unregister(drv);
+}
+
+static struct platform_driver arm_smmu_driver = {
+ .driver = {
+ .name = "arm-smmu-v3",
+ .of_match_table = arm_smmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = arm_smmu_device_probe,
+ .remove_new = arm_smmu_device_remove,
+ .shutdown = arm_smmu_device_shutdown,
+};
+module_driver(arm_smmu_driver, platform_driver_register,
+ arm_smmu_driver_unregister);
+
+MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
+MODULE_AUTHOR("Will Deacon <will@kernel.org>");
+MODULE_ALIAS("platform:arm-smmu-v3");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
new file mode 100644
index 0000000000..9915850dd4
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -0,0 +1,813 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IOMMU API for ARM architected SMMUv3 implementations.
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#ifndef _ARM_SMMU_V3_H
+#define _ARM_SMMU_V3_H
+
+#include <linux/bitfield.h>
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/mmzone.h>
+#include <linux/sizes.h>
+
+/* MMIO registers */
+#define ARM_SMMU_IDR0 0x0
+#define IDR0_ST_LVL GENMASK(28, 27)
+#define IDR0_ST_LVL_2LVL 1
+#define IDR0_STALL_MODEL GENMASK(25, 24)
+#define IDR0_STALL_MODEL_STALL 0
+#define IDR0_STALL_MODEL_FORCE 2
+#define IDR0_TTENDIAN GENMASK(22, 21)
+#define IDR0_TTENDIAN_MIXED 0
+#define IDR0_TTENDIAN_LE 2
+#define IDR0_TTENDIAN_BE 3
+#define IDR0_CD2L (1 << 19)
+#define IDR0_VMID16 (1 << 18)
+#define IDR0_PRI (1 << 16)
+#define IDR0_SEV (1 << 14)
+#define IDR0_MSI (1 << 13)
+#define IDR0_ASID16 (1 << 12)
+#define IDR0_ATS (1 << 10)
+#define IDR0_HYP (1 << 9)
+#define IDR0_COHACC (1 << 4)
+#define IDR0_TTF GENMASK(3, 2)
+#define IDR0_TTF_AARCH64 2
+#define IDR0_TTF_AARCH32_64 3
+#define IDR0_S1P (1 << 1)
+#define IDR0_S2P (1 << 0)
+
+#define ARM_SMMU_IDR1 0x4
+#define IDR1_TABLES_PRESET (1 << 30)
+#define IDR1_QUEUES_PRESET (1 << 29)
+#define IDR1_REL (1 << 28)
+#define IDR1_CMDQS GENMASK(25, 21)
+#define IDR1_EVTQS GENMASK(20, 16)
+#define IDR1_PRIQS GENMASK(15, 11)
+#define IDR1_SSIDSIZE GENMASK(10, 6)
+#define IDR1_SIDSIZE GENMASK(5, 0)
+
+#define ARM_SMMU_IDR3 0xc
+#define IDR3_RIL (1 << 10)
+
+#define ARM_SMMU_IDR5 0x14
+#define IDR5_STALL_MAX GENMASK(31, 16)
+#define IDR5_GRAN64K (1 << 6)
+#define IDR5_GRAN16K (1 << 5)
+#define IDR5_GRAN4K (1 << 4)
+#define IDR5_OAS GENMASK(2, 0)
+#define IDR5_OAS_32_BIT 0
+#define IDR5_OAS_36_BIT 1
+#define IDR5_OAS_40_BIT 2
+#define IDR5_OAS_42_BIT 3
+#define IDR5_OAS_44_BIT 4
+#define IDR5_OAS_48_BIT 5
+#define IDR5_OAS_52_BIT 6
+#define IDR5_VAX GENMASK(11, 10)
+#define IDR5_VAX_52_BIT 1
+
+#define ARM_SMMU_IIDR 0x18
+#define IIDR_PRODUCTID GENMASK(31, 20)
+#define IIDR_VARIANT GENMASK(19, 16)
+#define IIDR_REVISION GENMASK(15, 12)
+#define IIDR_IMPLEMENTER GENMASK(11, 0)
+
+#define ARM_SMMU_CR0 0x20
+#define CR0_ATSCHK (1 << 4)
+#define CR0_CMDQEN (1 << 3)
+#define CR0_EVTQEN (1 << 2)
+#define CR0_PRIQEN (1 << 1)
+#define CR0_SMMUEN (1 << 0)
+
+#define ARM_SMMU_CR0ACK 0x24
+
+#define ARM_SMMU_CR1 0x28
+#define CR1_TABLE_SH GENMASK(11, 10)
+#define CR1_TABLE_OC GENMASK(9, 8)
+#define CR1_TABLE_IC GENMASK(7, 6)
+#define CR1_QUEUE_SH GENMASK(5, 4)
+#define CR1_QUEUE_OC GENMASK(3, 2)
+#define CR1_QUEUE_IC GENMASK(1, 0)
+/* CR1 cacheability fields don't quite follow the usual TCR-style encoding */
+#define CR1_CACHE_NC 0
+#define CR1_CACHE_WB 1
+#define CR1_CACHE_WT 2
+
+#define ARM_SMMU_CR2 0x2c
+#define CR2_PTM (1 << 2)
+#define CR2_RECINVSID (1 << 1)
+#define CR2_E2H (1 << 0)
+
+#define ARM_SMMU_GBPA 0x44
+#define GBPA_UPDATE (1 << 31)
+#define GBPA_ABORT (1 << 20)
+
+#define ARM_SMMU_IRQ_CTRL 0x50
+#define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
+#define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
+#define IRQ_CTRL_GERROR_IRQEN (1 << 0)
+
+#define ARM_SMMU_IRQ_CTRLACK 0x54
+
+#define ARM_SMMU_GERROR 0x60
+#define GERROR_SFM_ERR (1 << 8)
+#define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
+#define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
+#define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
+#define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
+#define GERROR_PRIQ_ABT_ERR (1 << 3)
+#define GERROR_EVTQ_ABT_ERR (1 << 2)
+#define GERROR_CMDQ_ERR (1 << 0)
+#define GERROR_ERR_MASK 0x1fd
+
+#define ARM_SMMU_GERRORN 0x64
+
+#define ARM_SMMU_GERROR_IRQ_CFG0 0x68
+#define ARM_SMMU_GERROR_IRQ_CFG1 0x70
+#define ARM_SMMU_GERROR_IRQ_CFG2 0x74
+
+#define ARM_SMMU_STRTAB_BASE 0x80
+#define STRTAB_BASE_RA (1UL << 62)
+#define STRTAB_BASE_ADDR_MASK GENMASK_ULL(51, 6)
+
+#define ARM_SMMU_STRTAB_BASE_CFG 0x88
+#define STRTAB_BASE_CFG_FMT GENMASK(17, 16)
+#define STRTAB_BASE_CFG_FMT_LINEAR 0
+#define STRTAB_BASE_CFG_FMT_2LVL 1
+#define STRTAB_BASE_CFG_SPLIT GENMASK(10, 6)
+#define STRTAB_BASE_CFG_LOG2SIZE GENMASK(5, 0)
+
+#define ARM_SMMU_CMDQ_BASE 0x90
+#define ARM_SMMU_CMDQ_PROD 0x98
+#define ARM_SMMU_CMDQ_CONS 0x9c
+
+#define ARM_SMMU_EVTQ_BASE 0xa0
+#define ARM_SMMU_EVTQ_PROD 0xa8
+#define ARM_SMMU_EVTQ_CONS 0xac
+#define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
+#define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
+#define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
+
+#define ARM_SMMU_PRIQ_BASE 0xc0
+#define ARM_SMMU_PRIQ_PROD 0xc8
+#define ARM_SMMU_PRIQ_CONS 0xcc
+#define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
+#define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
+#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
+
+#define ARM_SMMU_REG_SZ 0xe00
+
+/* Common MSI config fields */
+#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
+#define MSI_CFG2_SH GENMASK(5, 4)
+#define MSI_CFG2_MEMATTR GENMASK(3, 0)
+
+/* Common memory attribute values */
+#define ARM_SMMU_SH_NSH 0
+#define ARM_SMMU_SH_OSH 2
+#define ARM_SMMU_SH_ISH 3
+#define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1
+#define ARM_SMMU_MEMATTR_OIWB 0xf
+
+#define Q_IDX(llq, p) ((p) & ((1 << (llq)->max_n_shift) - 1))
+#define Q_WRP(llq, p) ((p) & (1 << (llq)->max_n_shift))
+#define Q_OVERFLOW_FLAG (1U << 31)
+#define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG)
+#define Q_ENT(q, p) ((q)->base + \
+ Q_IDX(&((q)->llq), p) * \
+ (q)->ent_dwords)
+
+#define Q_BASE_RWA (1UL << 62)
+#define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
+#define Q_BASE_LOG2SIZE GENMASK(4, 0)
+
+/* Ensure DMA allocations are naturally aligned */
+#ifdef CONFIG_CMA_ALIGNMENT
+#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT)
+#else
+#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER)
+#endif
+
+/*
+ * Stream table.
+ *
+ * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
+ * 2lvl: 128k L1 entries,
+ * 256 lazy entries per table (each table covers a PCI bus)
+ */
+#define STRTAB_L1_SZ_SHIFT 20
+#define STRTAB_SPLIT 8
+
+#define STRTAB_L1_DESC_DWORDS 1
+#define STRTAB_L1_DESC_SPAN GENMASK_ULL(4, 0)
+#define STRTAB_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 6)
+
+#define STRTAB_STE_DWORDS 8
+#define STRTAB_STE_0_V (1UL << 0)
+#define STRTAB_STE_0_CFG GENMASK_ULL(3, 1)
+#define STRTAB_STE_0_CFG_ABORT 0
+#define STRTAB_STE_0_CFG_BYPASS 4
+#define STRTAB_STE_0_CFG_S1_TRANS 5
+#define STRTAB_STE_0_CFG_S2_TRANS 6
+
+#define STRTAB_STE_0_S1FMT GENMASK_ULL(5, 4)
+#define STRTAB_STE_0_S1FMT_LINEAR 0
+#define STRTAB_STE_0_S1FMT_64K_L2 2
+#define STRTAB_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6)
+#define STRTAB_STE_0_S1CDMAX GENMASK_ULL(63, 59)
+
+#define STRTAB_STE_1_S1DSS GENMASK_ULL(1, 0)
+#define STRTAB_STE_1_S1DSS_TERMINATE 0x0
+#define STRTAB_STE_1_S1DSS_BYPASS 0x1
+#define STRTAB_STE_1_S1DSS_SSID0 0x2
+
+#define STRTAB_STE_1_S1C_CACHE_NC 0UL
+#define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
+#define STRTAB_STE_1_S1C_CACHE_WT 2UL
+#define STRTAB_STE_1_S1C_CACHE_WB 3UL
+#define STRTAB_STE_1_S1CIR GENMASK_ULL(3, 2)
+#define STRTAB_STE_1_S1COR GENMASK_ULL(5, 4)
+#define STRTAB_STE_1_S1CSH GENMASK_ULL(7, 6)
+
+#define STRTAB_STE_1_S1STALLD (1UL << 27)
+
+#define STRTAB_STE_1_EATS GENMASK_ULL(29, 28)
+#define STRTAB_STE_1_EATS_ABT 0UL
+#define STRTAB_STE_1_EATS_TRANS 1UL
+#define STRTAB_STE_1_EATS_S1CHK 2UL
+
+#define STRTAB_STE_1_STRW GENMASK_ULL(31, 30)
+#define STRTAB_STE_1_STRW_NSEL1 0UL
+#define STRTAB_STE_1_STRW_EL2 2UL
+
+#define STRTAB_STE_1_SHCFG GENMASK_ULL(45, 44)
+#define STRTAB_STE_1_SHCFG_INCOMING 1UL
+
+#define STRTAB_STE_2_S2VMID GENMASK_ULL(15, 0)
+#define STRTAB_STE_2_VTCR GENMASK_ULL(50, 32)
+#define STRTAB_STE_2_VTCR_S2T0SZ GENMASK_ULL(5, 0)
+#define STRTAB_STE_2_VTCR_S2SL0 GENMASK_ULL(7, 6)
+#define STRTAB_STE_2_VTCR_S2IR0 GENMASK_ULL(9, 8)
+#define STRTAB_STE_2_VTCR_S2OR0 GENMASK_ULL(11, 10)
+#define STRTAB_STE_2_VTCR_S2SH0 GENMASK_ULL(13, 12)
+#define STRTAB_STE_2_VTCR_S2TG GENMASK_ULL(15, 14)
+#define STRTAB_STE_2_VTCR_S2PS GENMASK_ULL(18, 16)
+#define STRTAB_STE_2_S2AA64 (1UL << 51)
+#define STRTAB_STE_2_S2ENDI (1UL << 52)
+#define STRTAB_STE_2_S2PTW (1UL << 54)
+#define STRTAB_STE_2_S2R (1UL << 58)
+
+#define STRTAB_STE_3_S2TTB_MASK GENMASK_ULL(51, 4)
+
+/*
+ * Context descriptors.
+ *
+ * Linear: when less than 1024 SSIDs are supported
+ * 2lvl: at most 1024 L1 entries,
+ * 1024 lazy entries per table.
+ */
+#define CTXDESC_SPLIT 10
+#define CTXDESC_L2_ENTRIES (1 << CTXDESC_SPLIT)
+
+#define CTXDESC_L1_DESC_DWORDS 1
+#define CTXDESC_L1_DESC_V (1UL << 0)
+#define CTXDESC_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 12)
+
+#define CTXDESC_CD_DWORDS 8
+#define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
+#define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
+#define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
+#define CTXDESC_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10)
+#define CTXDESC_CD_0_TCR_SH0 GENMASK_ULL(13, 12)
+#define CTXDESC_CD_0_TCR_EPD0 (1ULL << 14)
+#define CTXDESC_CD_0_TCR_EPD1 (1ULL << 30)
+
+#define CTXDESC_CD_0_ENDI (1UL << 15)
+#define CTXDESC_CD_0_V (1UL << 31)
+
+#define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32)
+#define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38)
+
+#define CTXDESC_CD_0_AA64 (1UL << 41)
+#define CTXDESC_CD_0_S (1UL << 44)
+#define CTXDESC_CD_0_R (1UL << 45)
+#define CTXDESC_CD_0_A (1UL << 46)
+#define CTXDESC_CD_0_ASET (1UL << 47)
+#define CTXDESC_CD_0_ASID GENMASK_ULL(63, 48)
+
+#define CTXDESC_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
+
+/*
+ * When the SMMU only supports linear context descriptor tables, pick a
+ * reasonable size limit (64kB).
+ */
+#define CTXDESC_LINEAR_CDMAX ilog2(SZ_64K / (CTXDESC_CD_DWORDS << 3))
+
+/* Command queue */
+#define CMDQ_ENT_SZ_SHIFT 4
+#define CMDQ_ENT_DWORDS ((1 << CMDQ_ENT_SZ_SHIFT) >> 3)
+#define CMDQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - CMDQ_ENT_SZ_SHIFT)
+
+#define CMDQ_CONS_ERR GENMASK(30, 24)
+#define CMDQ_ERR_CERROR_NONE_IDX 0
+#define CMDQ_ERR_CERROR_ILL_IDX 1
+#define CMDQ_ERR_CERROR_ABT_IDX 2
+#define CMDQ_ERR_CERROR_ATC_INV_IDX 3
+
+#define CMDQ_PROD_OWNED_FLAG Q_OVERFLOW_FLAG
+
+/*
+ * This is used to size the command queue and therefore must be at least
+ * BITS_PER_LONG so that the valid_map works correctly (it relies on the
+ * total number of queue entries being a multiple of BITS_PER_LONG).
+ */
+#define CMDQ_BATCH_ENTRIES BITS_PER_LONG
+
+#define CMDQ_0_OP GENMASK_ULL(7, 0)
+#define CMDQ_0_SSV (1UL << 11)
+
+#define CMDQ_PREFETCH_0_SID GENMASK_ULL(63, 32)
+#define CMDQ_PREFETCH_1_SIZE GENMASK_ULL(4, 0)
+#define CMDQ_PREFETCH_1_ADDR_MASK GENMASK_ULL(63, 12)
+
+#define CMDQ_CFGI_0_SSID GENMASK_ULL(31, 12)
+#define CMDQ_CFGI_0_SID GENMASK_ULL(63, 32)
+#define CMDQ_CFGI_1_LEAF (1UL << 0)
+#define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0)
+
+#define CMDQ_TLBI_0_NUM GENMASK_ULL(16, 12)
+#define CMDQ_TLBI_RANGE_NUM_MAX 31
+#define CMDQ_TLBI_0_SCALE GENMASK_ULL(24, 20)
+#define CMDQ_TLBI_0_VMID GENMASK_ULL(47, 32)
+#define CMDQ_TLBI_0_ASID GENMASK_ULL(63, 48)
+#define CMDQ_TLBI_1_LEAF (1UL << 0)
+#define CMDQ_TLBI_1_TTL GENMASK_ULL(9, 8)
+#define CMDQ_TLBI_1_TG GENMASK_ULL(11, 10)
+#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
+#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12)
+
+#define CMDQ_ATC_0_SSID GENMASK_ULL(31, 12)
+#define CMDQ_ATC_0_SID GENMASK_ULL(63, 32)
+#define CMDQ_ATC_0_GLOBAL (1UL << 9)
+#define CMDQ_ATC_1_SIZE GENMASK_ULL(5, 0)
+#define CMDQ_ATC_1_ADDR_MASK GENMASK_ULL(63, 12)
+
+#define CMDQ_PRI_0_SSID GENMASK_ULL(31, 12)
+#define CMDQ_PRI_0_SID GENMASK_ULL(63, 32)
+#define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0)
+#define CMDQ_PRI_1_RESP GENMASK_ULL(13, 12)
+
+#define CMDQ_RESUME_0_RESP_TERM 0UL
+#define CMDQ_RESUME_0_RESP_RETRY 1UL
+#define CMDQ_RESUME_0_RESP_ABORT 2UL
+#define CMDQ_RESUME_0_RESP GENMASK_ULL(13, 12)
+#define CMDQ_RESUME_0_SID GENMASK_ULL(63, 32)
+#define CMDQ_RESUME_1_STAG GENMASK_ULL(15, 0)
+
+#define CMDQ_SYNC_0_CS GENMASK_ULL(13, 12)
+#define CMDQ_SYNC_0_CS_NONE 0
+#define CMDQ_SYNC_0_CS_IRQ 1
+#define CMDQ_SYNC_0_CS_SEV 2
+#define CMDQ_SYNC_0_MSH GENMASK_ULL(23, 22)
+#define CMDQ_SYNC_0_MSIATTR GENMASK_ULL(27, 24)
+#define CMDQ_SYNC_0_MSIDATA GENMASK_ULL(63, 32)
+#define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(51, 2)
+
+/* Event queue */
+#define EVTQ_ENT_SZ_SHIFT 5
+#define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
+#define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
+
+#define EVTQ_0_ID GENMASK_ULL(7, 0)
+
+#define EVT_ID_TRANSLATION_FAULT 0x10
+#define EVT_ID_ADDR_SIZE_FAULT 0x11
+#define EVT_ID_ACCESS_FAULT 0x12
+#define EVT_ID_PERMISSION_FAULT 0x13
+
+#define EVTQ_0_SSV (1UL << 11)
+#define EVTQ_0_SSID GENMASK_ULL(31, 12)
+#define EVTQ_0_SID GENMASK_ULL(63, 32)
+#define EVTQ_1_STAG GENMASK_ULL(15, 0)
+#define EVTQ_1_STALL (1UL << 31)
+#define EVTQ_1_PnU (1UL << 33)
+#define EVTQ_1_InD (1UL << 34)
+#define EVTQ_1_RnW (1UL << 35)
+#define EVTQ_1_S2 (1UL << 39)
+#define EVTQ_1_CLASS GENMASK_ULL(41, 40)
+#define EVTQ_1_TT_READ (1UL << 44)
+#define EVTQ_2_ADDR GENMASK_ULL(63, 0)
+#define EVTQ_3_IPA GENMASK_ULL(51, 12)
+
+/* PRI queue */
+#define PRIQ_ENT_SZ_SHIFT 4
+#define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
+#define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
+
+#define PRIQ_0_SID GENMASK_ULL(31, 0)
+#define PRIQ_0_SSID GENMASK_ULL(51, 32)
+#define PRIQ_0_PERM_PRIV (1UL << 58)
+#define PRIQ_0_PERM_EXEC (1UL << 59)
+#define PRIQ_0_PERM_READ (1UL << 60)
+#define PRIQ_0_PERM_WRITE (1UL << 61)
+#define PRIQ_0_PRG_LAST (1UL << 62)
+#define PRIQ_0_SSID_V (1UL << 63)
+
+#define PRIQ_1_PRG_IDX GENMASK_ULL(8, 0)
+#define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12)
+
+/* High-level queue structures */
+#define ARM_SMMU_POLL_TIMEOUT_US 1000000 /* 1s! */
+#define ARM_SMMU_POLL_SPIN_COUNT 10
+
+#define MSI_IOVA_BASE 0x8000000
+#define MSI_IOVA_LENGTH 0x100000
+
+enum pri_resp {
+ PRI_RESP_DENY = 0,
+ PRI_RESP_FAIL = 1,
+ PRI_RESP_SUCC = 2,
+};
+
+struct arm_smmu_cmdq_ent {
+ /* Common fields */
+ u8 opcode;
+ bool substream_valid;
+
+ /* Command-specific fields */
+ union {
+ #define CMDQ_OP_PREFETCH_CFG 0x1
+ struct {
+ u32 sid;
+ } prefetch;
+
+ #define CMDQ_OP_CFGI_STE 0x3
+ #define CMDQ_OP_CFGI_ALL 0x4
+ #define CMDQ_OP_CFGI_CD 0x5
+ #define CMDQ_OP_CFGI_CD_ALL 0x6
+ struct {
+ u32 sid;
+ u32 ssid;
+ union {
+ bool leaf;
+ u8 span;
+ };
+ } cfgi;
+
+ #define CMDQ_OP_TLBI_NH_ASID 0x11
+ #define CMDQ_OP_TLBI_NH_VA 0x12
+ #define CMDQ_OP_TLBI_EL2_ALL 0x20
+ #define CMDQ_OP_TLBI_EL2_ASID 0x21
+ #define CMDQ_OP_TLBI_EL2_VA 0x22
+ #define CMDQ_OP_TLBI_S12_VMALL 0x28
+ #define CMDQ_OP_TLBI_S2_IPA 0x2a
+ #define CMDQ_OP_TLBI_NSNH_ALL 0x30
+ struct {
+ u8 num;
+ u8 scale;
+ u16 asid;
+ u16 vmid;
+ bool leaf;
+ u8 ttl;
+ u8 tg;
+ u64 addr;
+ } tlbi;
+
+ #define CMDQ_OP_ATC_INV 0x40
+ #define ATC_INV_SIZE_ALL 52
+ struct {
+ u32 sid;
+ u32 ssid;
+ u64 addr;
+ u8 size;
+ bool global;
+ } atc;
+
+ #define CMDQ_OP_PRI_RESP 0x41
+ struct {
+ u32 sid;
+ u32 ssid;
+ u16 grpid;
+ enum pri_resp resp;
+ } pri;
+
+ #define CMDQ_OP_RESUME 0x44
+ struct {
+ u32 sid;
+ u16 stag;
+ u8 resp;
+ } resume;
+
+ #define CMDQ_OP_CMD_SYNC 0x46
+ struct {
+ u64 msiaddr;
+ } sync;
+ };
+};
+
+struct arm_smmu_ll_queue {
+ union {
+ u64 val;
+ struct {
+ u32 prod;
+ u32 cons;
+ };
+ struct {
+ atomic_t prod;
+ atomic_t cons;
+ } atomic;
+ u8 __pad[SMP_CACHE_BYTES];
+ } ____cacheline_aligned_in_smp;
+ u32 max_n_shift;
+};
+
+struct arm_smmu_queue {
+ struct arm_smmu_ll_queue llq;
+ int irq; /* Wired interrupt */
+
+ __le64 *base;
+ dma_addr_t base_dma;
+ u64 q_base;
+
+ size_t ent_dwords;
+
+ u32 __iomem *prod_reg;
+ u32 __iomem *cons_reg;
+};
+
+struct arm_smmu_queue_poll {
+ ktime_t timeout;
+ unsigned int delay;
+ unsigned int spin_cnt;
+ bool wfe;
+};
+
+struct arm_smmu_cmdq {
+ struct arm_smmu_queue q;
+ atomic_long_t *valid_map;
+ atomic_t owner_prod;
+ atomic_t lock;
+};
+
+struct arm_smmu_cmdq_batch {
+ u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS];
+ int num;
+};
+
+struct arm_smmu_evtq {
+ struct arm_smmu_queue q;
+ struct iopf_queue *iopf;
+ u32 max_stalls;
+};
+
+struct arm_smmu_priq {
+ struct arm_smmu_queue q;
+};
+
+/* High-level stream table and context descriptor structures */
+struct arm_smmu_strtab_l1_desc {
+ u8 span;
+
+ __le64 *l2ptr;
+ dma_addr_t l2ptr_dma;
+};
+
+struct arm_smmu_ctx_desc {
+ u16 asid;
+ u64 ttbr;
+ u64 tcr;
+ u64 mair;
+
+ refcount_t refs;
+ struct mm_struct *mm;
+};
+
+struct arm_smmu_l1_ctx_desc {
+ __le64 *l2ptr;
+ dma_addr_t l2ptr_dma;
+};
+
+struct arm_smmu_ctx_desc_cfg {
+ __le64 *cdtab;
+ dma_addr_t cdtab_dma;
+ struct arm_smmu_l1_ctx_desc *l1_desc;
+ unsigned int num_l1_ents;
+};
+
+struct arm_smmu_s1_cfg {
+ struct arm_smmu_ctx_desc_cfg cdcfg;
+ struct arm_smmu_ctx_desc cd;
+ u8 s1fmt;
+ u8 s1cdmax;
+};
+
+struct arm_smmu_s2_cfg {
+ u16 vmid;
+ u64 vttbr;
+ u64 vtcr;
+};
+
+struct arm_smmu_strtab_cfg {
+ __le64 *strtab;
+ dma_addr_t strtab_dma;
+ struct arm_smmu_strtab_l1_desc *l1_desc;
+ unsigned int num_l1_ents;
+
+ u64 strtab_base;
+ u32 strtab_base_cfg;
+};
+
+/* An SMMUv3 instance */
+struct arm_smmu_device {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *page1;
+
+#define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
+#define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
+#define ARM_SMMU_FEAT_TT_LE (1 << 2)
+#define ARM_SMMU_FEAT_TT_BE (1 << 3)
+#define ARM_SMMU_FEAT_PRI (1 << 4)
+#define ARM_SMMU_FEAT_ATS (1 << 5)
+#define ARM_SMMU_FEAT_SEV (1 << 6)
+#define ARM_SMMU_FEAT_MSI (1 << 7)
+#define ARM_SMMU_FEAT_COHERENCY (1 << 8)
+#define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
+#define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
+#define ARM_SMMU_FEAT_STALLS (1 << 11)
+#define ARM_SMMU_FEAT_HYP (1 << 12)
+#define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
+#define ARM_SMMU_FEAT_VAX (1 << 14)
+#define ARM_SMMU_FEAT_RANGE_INV (1 << 15)
+#define ARM_SMMU_FEAT_BTM (1 << 16)
+#define ARM_SMMU_FEAT_SVA (1 << 17)
+#define ARM_SMMU_FEAT_E2H (1 << 18)
+#define ARM_SMMU_FEAT_NESTING (1 << 19)
+ u32 features;
+
+#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
+#define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
+#define ARM_SMMU_OPT_MSIPOLL (1 << 2)
+#define ARM_SMMU_OPT_CMDQ_FORCE_SYNC (1 << 3)
+ u32 options;
+
+ struct arm_smmu_cmdq cmdq;
+ struct arm_smmu_evtq evtq;
+ struct arm_smmu_priq priq;
+
+ int gerr_irq;
+ int combined_irq;
+
+ unsigned long ias; /* IPA */
+ unsigned long oas; /* PA */
+ unsigned long pgsize_bitmap;
+
+#define ARM_SMMU_MAX_ASIDS (1 << 16)
+ unsigned int asid_bits;
+
+#define ARM_SMMU_MAX_VMIDS (1 << 16)
+ unsigned int vmid_bits;
+ struct ida vmid_map;
+
+ unsigned int ssid_bits;
+ unsigned int sid_bits;
+
+ struct arm_smmu_strtab_cfg strtab_cfg;
+
+ /* IOMMU core code handle */
+ struct iommu_device iommu;
+
+ struct rb_root streams;
+ struct mutex streams_mutex;
+};
+
+struct arm_smmu_stream {
+ u32 id;
+ struct arm_smmu_master *master;
+ struct rb_node node;
+};
+
+/* SMMU private data for each master */
+struct arm_smmu_master {
+ struct arm_smmu_device *smmu;
+ struct device *dev;
+ struct arm_smmu_domain *domain;
+ struct list_head domain_head;
+ struct arm_smmu_stream *streams;
+ unsigned int num_streams;
+ bool ats_enabled;
+ bool stall_enabled;
+ bool sva_enabled;
+ bool iopf_enabled;
+ struct list_head bonds;
+ unsigned int ssid_bits;
+};
+
+/* SMMU private data for an IOMMU domain */
+enum arm_smmu_domain_stage {
+ ARM_SMMU_DOMAIN_S1 = 0,
+ ARM_SMMU_DOMAIN_S2,
+ ARM_SMMU_DOMAIN_NESTED,
+ ARM_SMMU_DOMAIN_BYPASS,
+};
+
+struct arm_smmu_domain {
+ struct arm_smmu_device *smmu;
+ struct mutex init_mutex; /* Protects smmu pointer */
+
+ struct io_pgtable_ops *pgtbl_ops;
+ bool stall_enabled;
+ atomic_t nr_ats_masters;
+
+ enum arm_smmu_domain_stage stage;
+ union {
+ struct arm_smmu_s1_cfg s1_cfg;
+ struct arm_smmu_s2_cfg s2_cfg;
+ };
+
+ struct iommu_domain domain;
+
+ struct list_head devices;
+ spinlock_t devices_lock;
+
+ struct list_head mmu_notifiers;
+};
+
+static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct arm_smmu_domain, domain);
+}
+
+extern struct xarray arm_smmu_asid_xa;
+extern struct mutex arm_smmu_asid_lock;
+extern struct arm_smmu_ctx_desc quiet_cd;
+
+int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
+ struct arm_smmu_ctx_desc *cd);
+void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
+void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
+ size_t granule, bool leaf,
+ struct arm_smmu_domain *smmu_domain);
+bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd);
+int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
+ unsigned long iova, size_t size);
+
+#ifdef CONFIG_ARM_SMMU_V3_SVA
+bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
+bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);
+bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
+int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
+int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
+bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master);
+void arm_smmu_sva_notifier_synchronize(void);
+struct iommu_domain *arm_smmu_sva_domain_alloc(void);
+void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t id);
+#else /* CONFIG_ARM_SMMU_V3_SVA */
+static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
+{
+ return false;
+}
+
+static inline bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
+{
+ return false;
+}
+
+static inline bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
+{
+ return false;
+}
+
+static inline int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
+{
+ return -ENODEV;
+}
+
+static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
+{
+ return -ENODEV;
+}
+
+static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
+{
+ return false;
+}
+
+static inline void arm_smmu_sva_notifier_synchronize(void) {}
+
+static inline struct iommu_domain *arm_smmu_sva_domain_alloc(void)
+{
+ return NULL;
+}
+
+static inline void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
+ struct device *dev,
+ ioasid_t id)
+{
+}
+#endif /* CONFIG_ARM_SMMU_V3_SVA */
+#endif /* _ARM_SMMU_V3_H */
diff --git a/drivers/iommu/arm/arm-smmu/Makefile b/drivers/iommu/arm/arm-smmu/Makefile
new file mode 100644
index 0000000000..2a5a95e8e3
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
+obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
+arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o
+arm_smmu-$(CONFIG_ARM_SMMU_QCOM) += arm-smmu-qcom.o
+arm_smmu-$(CONFIG_ARM_SMMU_QCOM_DEBUG) += arm-smmu-qcom-debug.o
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
new file mode 100644
index 0000000000..9dc772f2cb
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Miscellaneous Arm SMMU implementation and integration quirks
+// Copyright (C) 2019 Arm Limited
+
+#define pr_fmt(fmt) "arm-smmu: " fmt
+
+#include <linux/bitfield.h>
+#include <linux/of.h>
+
+#include "arm-smmu.h"
+
+
+static int arm_smmu_gr0_ns(int offset)
+{
+ switch (offset) {
+ case ARM_SMMU_GR0_sCR0:
+ case ARM_SMMU_GR0_sACR:
+ case ARM_SMMU_GR0_sGFSR:
+ case ARM_SMMU_GR0_sGFSYNR0:
+ case ARM_SMMU_GR0_sGFSYNR1:
+ case ARM_SMMU_GR0_sGFSYNR2:
+ return offset + 0x400;
+ default:
+ return offset;
+ }
+}
+
+static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page,
+ int offset)
+{
+ if (page == ARM_SMMU_GR0)
+ offset = arm_smmu_gr0_ns(offset);
+ return readl_relaxed(arm_smmu_page(smmu, page) + offset);
+}
+
+static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page,
+ int offset, u32 val)
+{
+ if (page == ARM_SMMU_GR0)
+ offset = arm_smmu_gr0_ns(offset);
+ writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
+}
+
+/* Since we don't care for sGFAR, we can do without 64-bit accessors */
+static const struct arm_smmu_impl calxeda_impl = {
+ .read_reg = arm_smmu_read_ns,
+ .write_reg = arm_smmu_write_ns,
+};
+
+
+struct cavium_smmu {
+ struct arm_smmu_device smmu;
+ u32 id_base;
+};
+
+static int cavium_cfg_probe(struct arm_smmu_device *smmu)
+{
+ static atomic_t context_count = ATOMIC_INIT(0);
+ struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu);
+ /*
+ * Cavium CN88xx erratum #27704.
+ * Ensure ASID and VMID allocation is unique across all SMMUs in
+ * the system.
+ */
+ cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count);
+ dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
+
+ return 0;
+}
+
+static int cavium_init_context(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
+{
+ struct cavium_smmu *cs = container_of(smmu_domain->smmu,
+ struct cavium_smmu, smmu);
+
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
+ smmu_domain->cfg.vmid += cs->id_base;
+ else
+ smmu_domain->cfg.asid += cs->id_base;
+
+ return 0;
+}
+
+static const struct arm_smmu_impl cavium_impl = {
+ .cfg_probe = cavium_cfg_probe,
+ .init_context = cavium_init_context,
+};
+
+static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+ struct cavium_smmu *cs;
+
+ cs = devm_krealloc(smmu->dev, smmu, sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return ERR_PTR(-ENOMEM);
+
+ cs->smmu.impl = &cavium_impl;
+
+ return &cs->smmu;
+}
+
+
+#define ARM_MMU500_ACTLR_CPRE (1 << 1)
+
+#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
+#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
+#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
+
+int arm_mmu500_reset(struct arm_smmu_device *smmu)
+{
+ u32 reg, major;
+ int i;
+ /*
+ * On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before
+ * writes to the context bank ACTLRs will stick. And we just hope that
+ * Secure has also cleared SACR.CACHE_LOCK for this to take effect...
+ */
+ reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
+ major = FIELD_GET(ARM_SMMU_ID7_MAJOR, reg);
+ reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
+ if (major >= 2)
+ reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
+ /*
+ * Allow unmatched Stream IDs to allocate bypass
+ * TLB entries for reduced latency.
+ */
+ reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);
+
+ /*
+ * Disable MMU-500's not-particularly-beneficial next-page
+ * prefetcher for the sake of errata #841119 and #826419.
+ */
+ for (i = 0; i < smmu->num_context_banks; ++i) {
+ reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
+ reg &= ~ARM_MMU500_ACTLR_CPRE;
+ arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
+ reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
+ if (reg & ARM_MMU500_ACTLR_CPRE)
+ dev_warn_once(smmu->dev, "Failed to disable prefetcher [errata #841119 and #826419], check ACR.CACHE_LOCK\n");
+ }
+
+ return 0;
+}
+
+static const struct arm_smmu_impl arm_mmu500_impl = {
+ .reset = arm_mmu500_reset,
+};
+
+static u64 mrvl_mmu500_readq(struct arm_smmu_device *smmu, int page, int off)
+{
+ /*
+ * Marvell Armada-AP806 erratum #582743.
+ * Split all the readq to double readl
+ */
+ return hi_lo_readq_relaxed(arm_smmu_page(smmu, page) + off);
+}
+
+static void mrvl_mmu500_writeq(struct arm_smmu_device *smmu, int page, int off,
+ u64 val)
+{
+ /*
+ * Marvell Armada-AP806 erratum #582743.
+ * Split all the writeq to double writel
+ */
+ hi_lo_writeq_relaxed(val, arm_smmu_page(smmu, page) + off);
+}
+
+static int mrvl_mmu500_cfg_probe(struct arm_smmu_device *smmu)
+{
+
+ /*
+ * Armada-AP806 erratum #582743.
+ * Hide the SMMU_IDR2.PTFSv8 fields to sidestep the AArch64
+ * formats altogether and allow using 32 bits access on the
+ * interconnect.
+ */
+ smmu->features &= ~(ARM_SMMU_FEAT_FMT_AARCH64_4K |
+ ARM_SMMU_FEAT_FMT_AARCH64_16K |
+ ARM_SMMU_FEAT_FMT_AARCH64_64K);
+
+ return 0;
+}
+
+static const struct arm_smmu_impl mrvl_mmu500_impl = {
+ .read_reg64 = mrvl_mmu500_readq,
+ .write_reg64 = mrvl_mmu500_writeq,
+ .cfg_probe = mrvl_mmu500_cfg_probe,
+ .reset = arm_mmu500_reset,
+};
+
+
+struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+ const struct device_node *np = smmu->dev->of_node;
+
+ /*
+ * Set the impl for model-specific implementation quirks first,
+ * such that platform integration quirks can pick it up and
+ * inherit from it if necessary.
+ */
+ switch (smmu->model) {
+ case ARM_MMU500:
+ smmu->impl = &arm_mmu500_impl;
+ break;
+ case CAVIUM_SMMUV2:
+ return cavium_smmu_impl_init(smmu);
+ default:
+ break;
+ }
+
+ /* This is implicitly MMU-400 */
+ if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
+ smmu->impl = &calxeda_impl;
+
+ if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
+ of_device_is_compatible(np, "nvidia,tegra194-smmu") ||
+ of_device_is_compatible(np, "nvidia,tegra186-smmu"))
+ return nvidia_smmu_impl_init(smmu);
+
+ if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM))
+ smmu = qcom_smmu_impl_init(smmu);
+
+ if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
+ smmu->impl = &mrvl_mmu500_impl;
+
+ return smmu;
+}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
new file mode 100644
index 0000000000..87bf522b9d
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019-2020 NVIDIA CORPORATION. All rights reserved.
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <soc/tegra/mc.h>
+
+#include "arm-smmu.h"
+
+/*
+ * Tegra194 has three ARM MMU-500 Instances.
+ * Two of them are used together and must be programmed identically for
+ * interleaved IOVA accesses across them and translates accesses from
+ * non-isochronous HW devices.
+ * Third one is used for translating accesses from isochronous HW devices.
+ *
+ * In addition, the SMMU driver needs to coordinate with the memory controller
+ * driver to ensure that the right SID override is programmed for any given
+ * memory client. This is necessary to allow for use-case such as seamlessly
+ * handing over the display controller configuration from the firmware to the
+ * kernel.
+ *
+ * This implementation supports programming of the two instances that must
+ * be programmed identically and takes care of invoking the memory controller
+ * driver for SID override programming after devices have been attached to an
+ * SMMU instance.
+ */
+#define MAX_SMMU_INSTANCES 2
+
+struct nvidia_smmu {
+ struct arm_smmu_device smmu;
+ void __iomem *bases[MAX_SMMU_INSTANCES];
+ unsigned int num_instances;
+ struct tegra_mc *mc;
+};
+
+static inline struct nvidia_smmu *to_nvidia_smmu(struct arm_smmu_device *smmu)
+{
+ return container_of(smmu, struct nvidia_smmu, smmu);
+}
+
+static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu,
+ unsigned int inst, int page)
+{
+ struct nvidia_smmu *nvidia_smmu;
+
+ nvidia_smmu = container_of(smmu, struct nvidia_smmu, smmu);
+ return nvidia_smmu->bases[inst] + (page << smmu->pgshift);
+}
+
+static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu,
+ int page, int offset)
+{
+ void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
+
+ return readl_relaxed(reg);
+}
+
+static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu,
+ int page, int offset, u32 val)
+{
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
+ unsigned int i;
+
+ for (i = 0; i < nvidia->num_instances; i++) {
+ void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
+
+ writel_relaxed(val, reg);
+ }
+}
+
+static u64 nvidia_smmu_read_reg64(struct arm_smmu_device *smmu,
+ int page, int offset)
+{
+ void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
+
+ return readq_relaxed(reg);
+}
+
+static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
+ int page, int offset, u64 val)
+{
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
+ unsigned int i;
+
+ for (i = 0; i < nvidia->num_instances; i++) {
+ void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
+
+ writeq_relaxed(val, reg);
+ }
+}
+
+static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
+ int sync, int status)
+{
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
+ unsigned int delay;
+
+ arm_smmu_writel(smmu, page, sync, 0);
+
+ for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
+ unsigned int spin_cnt;
+
+ for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
+ u32 val = 0;
+ unsigned int i;
+
+ for (i = 0; i < nvidia->num_instances; i++) {
+ void __iomem *reg;
+
+ reg = nvidia_smmu_page(smmu, i, page) + status;
+ val |= readl_relaxed(reg);
+ }
+
+ if (!(val & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
+ return;
+
+ cpu_relax();
+ }
+
+ udelay(delay);
+ }
+
+ dev_err_ratelimited(smmu->dev,
+ "TLB sync timed out -- SMMU may be deadlocked\n");
+}
+
+static int nvidia_smmu_reset(struct arm_smmu_device *smmu)
+{
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
+ unsigned int i;
+
+ for (i = 0; i < nvidia->num_instances; i++) {
+ u32 val;
+ void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) +
+ ARM_SMMU_GR0_sGFSR;
+
+ /* clear global FSR */
+ val = readl_relaxed(reg);
+ writel_relaxed(val, reg);
+ }
+
+ return 0;
+}
+
+static irqreturn_t nvidia_smmu_global_fault_inst(int irq,
+ struct arm_smmu_device *smmu,
+ int inst)
+{
+ u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
+ void __iomem *gr0_base = nvidia_smmu_page(smmu, inst, 0);
+
+ gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
+ if (!gfsr)
+ return IRQ_NONE;
+
+ gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
+ gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
+ gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
+
+ dev_err_ratelimited(smmu->dev,
+ "Unexpected global fault, this could be serious\n");
+ dev_err_ratelimited(smmu->dev,
+ "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
+ gfsr, gfsynr0, gfsynr1, gfsynr2);
+
+ writel_relaxed(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nvidia_smmu_global_fault(int irq, void *dev)
+{
+ unsigned int inst;
+ irqreturn_t ret = IRQ_NONE;
+ struct arm_smmu_device *smmu = dev;
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
+
+ for (inst = 0; inst < nvidia->num_instances; inst++) {
+ irqreturn_t irq_ret;
+
+ irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst);
+ if (irq_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static irqreturn_t nvidia_smmu_context_fault_bank(int irq,
+ struct arm_smmu_device *smmu,
+ int idx, int inst)
+{
+ u32 fsr, fsynr, cbfrsynra;
+ unsigned long iova;
+ void __iomem *gr1_base = nvidia_smmu_page(smmu, inst, 1);
+ void __iomem *cb_base = nvidia_smmu_page(smmu, inst, smmu->numpage + idx);
+
+ fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+ if (!(fsr & ARM_SMMU_FSR_FAULT))
+ return IRQ_NONE;
+
+ fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
+ iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
+ cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(idx));
+
+ dev_err_ratelimited(smmu->dev,
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
+ fsr, iova, fsynr, cbfrsynra, idx);
+
+ writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
+{
+ int idx;
+ unsigned int inst;
+ irqreturn_t ret = IRQ_NONE;
+ struct arm_smmu_device *smmu;
+ struct iommu_domain *domain = dev;
+ struct arm_smmu_domain *smmu_domain;
+ struct nvidia_smmu *nvidia;
+
+ smmu_domain = container_of(domain, struct arm_smmu_domain, domain);
+ smmu = smmu_domain->smmu;
+ nvidia = to_nvidia_smmu(smmu);
+
+ for (inst = 0; inst < nvidia->num_instances; inst++) {
+ irqreturn_t irq_ret;
+
+ /*
+ * Interrupt line is shared between all contexts.
+ * Check for faults across all contexts.
+ */
+ for (idx = 0; idx < smmu->num_context_banks; idx++) {
+ irq_ret = nvidia_smmu_context_fault_bank(irq, smmu,
+ idx, inst);
+ if (irq_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct device *dev)
+{
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
+ int err;
+
+ err = tegra_mc_probe_device(nvidia->mc, dev);
+ if (err < 0)
+ dev_err(smmu->dev, "memory controller probe failed for %s: %d\n",
+ dev_name(dev), err);
+}
+
+static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg,
+ struct device *dev)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ const struct device_node *np = smmu->dev->of_node;
+
+ /*
+ * Tegra194 and Tegra234 SoCs have the erratum that causes walk cache
+ * entries to not be invalidated correctly. The problem is that the walk
+ * cache index generated for IOVA is not same across translation and
+ * invalidation requests. This is leading to page faults when PMD entry
+ * is released during unmap and populated with new PTE table during
+ * subsequent map request. Disabling large page mappings avoids the
+ * release of PMD entry and avoid translations seeing stale PMD entry in
+ * walk cache.
+ * Fix this by limiting the page mappings to PAGE_SIZE on Tegra194 and
+ * Tegra234.
+ */
+ if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
+ of_device_is_compatible(np, "nvidia,tegra194-smmu")) {
+ smmu->pgsize_bitmap = PAGE_SIZE;
+ pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap;
+ }
+
+ return 0;
+}
+
+static const struct arm_smmu_impl nvidia_smmu_impl = {
+ .read_reg = nvidia_smmu_read_reg,
+ .write_reg = nvidia_smmu_write_reg,
+ .read_reg64 = nvidia_smmu_read_reg64,
+ .write_reg64 = nvidia_smmu_write_reg64,
+ .reset = nvidia_smmu_reset,
+ .tlb_sync = nvidia_smmu_tlb_sync,
+ .global_fault = nvidia_smmu_global_fault,
+ .context_fault = nvidia_smmu_context_fault,
+ .probe_finalize = nvidia_smmu_probe_finalize,
+ .init_context = nvidia_smmu_init_context,
+};
+
+static const struct arm_smmu_impl nvidia_smmu_single_impl = {
+ .probe_finalize = nvidia_smmu_probe_finalize,
+ .init_context = nvidia_smmu_init_context,
+};
+
+struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+ struct resource *res;
+ struct device *dev = smmu->dev;
+ struct nvidia_smmu *nvidia_smmu;
+ struct platform_device *pdev = to_platform_device(dev);
+ unsigned int i;
+
+ nvidia_smmu = devm_krealloc(dev, smmu, sizeof(*nvidia_smmu), GFP_KERNEL);
+ if (!nvidia_smmu)
+ return ERR_PTR(-ENOMEM);
+
+ nvidia_smmu->mc = devm_tegra_memory_controller_get(dev);
+ if (IS_ERR(nvidia_smmu->mc))
+ return ERR_CAST(nvidia_smmu->mc);
+
+ /* Instance 0 is ioremapped by arm-smmu.c. */
+ nvidia_smmu->bases[0] = smmu->base;
+ nvidia_smmu->num_instances++;
+
+ for (i = 1; i < MAX_SMMU_INSTANCES; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
+ break;
+
+ nvidia_smmu->bases[i] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nvidia_smmu->bases[i]))
+ return ERR_CAST(nvidia_smmu->bases[i]);
+
+ nvidia_smmu->num_instances++;
+ }
+
+ if (nvidia_smmu->num_instances == 1)
+ nvidia_smmu->smmu.impl = &nvidia_smmu_single_impl;
+ else
+ nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
+
+ return &nvidia_smmu->smmu;
+}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
new file mode 100644
index 0000000000..bb89d49adf
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/ratelimit.h>
+
+#include "arm-smmu.h"
+#include "arm-smmu-qcom.h"
+
+void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
+{
+ int ret;
+ u32 tbu_pwr_status, sync_inv_ack, sync_inv_progress;
+ struct qcom_smmu *qsmmu = container_of(smmu, struct qcom_smmu, smmu);
+ const struct qcom_smmu_config *cfg;
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ if (__ratelimit(&rs)) {
+ dev_err(smmu->dev, "TLB sync timed out -- SMMU may be deadlocked\n");
+
+ cfg = qsmmu->cfg;
+ if (!cfg)
+ return;
+
+ ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_TBU_PWR_STATUS],
+ &tbu_pwr_status);
+ if (ret)
+ dev_err(smmu->dev,
+ "Failed to read TBU power status: %d\n", ret);
+
+ ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_STATS_SYNC_INV_TBU_ACK],
+ &sync_inv_ack);
+ if (ret)
+ dev_err(smmu->dev,
+ "Failed to read TBU sync/inv ack status: %d\n", ret);
+
+ ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR],
+ &sync_inv_progress);
+ if (ret)
+ dev_err(smmu->dev,
+ "Failed to read TCU syn/inv progress: %d\n", ret);
+
+ dev_err(smmu->dev,
+ "TBU: power_status %#x sync_inv_ack %#x sync_inv_progress %#x\n",
+ tbu_pwr_status, sync_inv_ack, sync_inv_progress);
+ }
+}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
new file mode 100644
index 0000000000..40503376d8
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/acpi.h>
+#include <linux/adreno-smmu-priv.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+
+#include "arm-smmu.h"
+#include "arm-smmu-qcom.h"
+
+#define QCOM_DUMMY_VAL -1
+
+static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
+{
+ return container_of(smmu, struct qcom_smmu, smmu);
+}
+
+static void qcom_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
+ int sync, int status)
+{
+ unsigned int spin_cnt, delay;
+ u32 reg;
+
+ arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
+ for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
+ for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
+ reg = arm_smmu_readl(smmu, page, status);
+ if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
+ return;
+ cpu_relax();
+ }
+ udelay(delay);
+ }
+
+ qcom_smmu_tlb_sync_debug(smmu);
+}
+
+static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx,
+ u32 reg)
+{
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+
+ /*
+ * On the GPU device we want to process subsequent transactions after a
+ * fault to keep the GPU from hanging
+ */
+ reg |= ARM_SMMU_SCTLR_HUPCF;
+
+ if (qsmmu->stall_enabled & BIT(idx))
+ reg |= ARM_SMMU_SCTLR_CFCFG;
+
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
+}
+
+static void qcom_adreno_smmu_get_fault_info(const void *cookie,
+ struct adreno_smmu_fault_info *info)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ info->fsr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSR);
+ info->fsynr0 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR0);
+ info->fsynr1 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR1);
+ info->far = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_FAR);
+ info->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
+ info->ttbr0 = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_TTBR0);
+ info->contextidr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_CONTEXTIDR);
+}
+
+static void qcom_adreno_smmu_set_stall(const void *cookie, bool enabled)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu_domain->smmu);
+
+ if (enabled)
+ qsmmu->stall_enabled |= BIT(cfg->cbndx);
+ else
+ qsmmu->stall_enabled &= ~BIT(cfg->cbndx);
+}
+
+static void qcom_adreno_smmu_resume_translation(const void *cookie, bool terminate)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ u32 reg = 0;
+
+ if (terminate)
+ reg |= ARM_SMMU_RESUME_TERMINATE;
+
+ arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
+}
+
+#define QCOM_ADRENO_SMMU_GPU_SID 0
+
+static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
+{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ int i;
+
+ /*
+ * The GPU will always use SID 0 so that is a handy way to uniquely
+ * identify it and configure it for per-instance pagetables
+ */
+ for (i = 0; i < fwspec->num_ids; i++) {
+ u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
+
+ if (sid == QCOM_ADRENO_SMMU_GPU_SID)
+ return true;
+ }
+
+ return false;
+}
+
+static const struct io_pgtable_cfg *qcom_adreno_smmu_get_ttbr1_cfg(
+ const void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct io_pgtable *pgtable =
+ io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
+ return &pgtable->cfg;
+}
+
+/*
+ * Local implementation to configure TTBR0 with the specified pagetable config.
+ * The GPU driver will call this to enable TTBR0 when per-instance pagetables
+ * are active
+ */
+
+static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie,
+ const struct io_pgtable_cfg *pgtbl_cfg)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
+
+ /* The domain must have split pagetables already enabled */
+ if (cb->tcr[0] & ARM_SMMU_TCR_EPD1)
+ return -EINVAL;
+
+ /* If the pagetable config is NULL, disable TTBR0 */
+ if (!pgtbl_cfg) {
+ /* Do nothing if it is already disabled */
+ if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0))
+ return -EINVAL;
+
+ /* Set TCR to the original configuration */
+ cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg);
+ cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
+ } else {
+ u32 tcr = cb->tcr[0];
+
+ /* Don't call this again if TTBR0 is already enabled */
+ if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0))
+ return -EINVAL;
+
+ tcr |= arm_smmu_lpae_tcr(pgtbl_cfg);
+ tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1);
+
+ cb->tcr[0] = tcr;
+ cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+ cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
+ }
+
+ arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx);
+
+ return 0;
+}
+
+static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_device *smmu,
+ struct device *dev, int start)
+{
+ int count;
+
+ /*
+ * Assign context bank 0 to the GPU device so the GPU hardware can
+ * switch pagetables
+ */
+ if (qcom_adreno_smmu_is_gpu_device(dev)) {
+ start = 0;
+ count = 1;
+ } else {
+ start = 1;
+ count = smmu->num_context_banks;
+ }
+
+ return __arm_smmu_alloc_bitmap(smmu->context_map, start, count);
+}
+
+static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu)
+{
+ const struct device_node *np = smmu->dev->of_node;
+
+ if (of_device_is_compatible(np, "qcom,msm8996-smmu-v2"))
+ return false;
+
+ return true;
+}
+
+static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
+{
+ struct adreno_smmu_priv *priv;
+
+ smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
+
+ /* Only enable split pagetables for the GPU device (SID 0) */
+ if (!qcom_adreno_smmu_is_gpu_device(dev))
+ return 0;
+
+ /*
+ * All targets that use the qcom,adreno-smmu compatible string *should*
+ * be AARCH64 stage 1 but double check because the arm-smmu code assumes
+ * that is the case when the TTBR1 quirk is enabled
+ */
+ if (qcom_adreno_can_do_ttbr1(smmu_domain->smmu) &&
+ (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
+ (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64))
+ pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1;
+
+ /*
+ * Initialize private interface with GPU:
+ */
+
+ priv = dev_get_drvdata(dev);
+ priv->cookie = smmu_domain;
+ priv->get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg;
+ priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
+ priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
+ priv->set_stall = qcom_adreno_smmu_set_stall;
+ priv->resume_translation = qcom_adreno_smmu_resume_translation;
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
+ { .compatible = "qcom,adreno" },
+ { .compatible = "qcom,adreno-gmu" },
+ { .compatible = "qcom,mdp4" },
+ { .compatible = "qcom,mdss" },
+ { .compatible = "qcom,sc7180-mdss" },
+ { .compatible = "qcom,sc7180-mss-pil" },
+ { .compatible = "qcom,sc7280-mdss" },
+ { .compatible = "qcom,sc7280-mss-pil" },
+ { .compatible = "qcom,sc8180x-mdss" },
+ { .compatible = "qcom,sc8280xp-mdss" },
+ { .compatible = "qcom,sdm845-mdss" },
+ { .compatible = "qcom,sdm845-mss-pil" },
+ { .compatible = "qcom,sm6350-mdss" },
+ { .compatible = "qcom,sm6375-mdss" },
+ { .compatible = "qcom,sm8150-mdss" },
+ { .compatible = "qcom,sm8250-mdss" },
+ { }
+};
+
+static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
+{
+ smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
+
+ return 0;
+}
+
+static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
+{
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ unsigned int last_s2cr;
+ u32 reg;
+ u32 smr;
+ int i;
+
+ /*
+ * Some platforms support more than the Arm SMMU architected maximum of
+ * 128 stream matching groups. For unknown reasons, the additional
+ * groups don't exhibit the same behavior as the architected registers,
+ * so limit the groups to 128 until the behavior is fixed for the other
+ * groups.
+ */
+ if (smmu->num_mapping_groups > 128) {
+ dev_notice(smmu->dev, "\tLimiting the stream matching groups to 128\n");
+ smmu->num_mapping_groups = 128;
+ }
+
+ last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
+
+ /*
+ * With some firmware versions writes to S2CR of type FAULT are
+ * ignored, and writing BYPASS will end up written as FAULT in the
+ * register. Perform a write to S2CR to detect if this is the case and
+ * if so reserve a context bank to emulate bypass streams.
+ */
+ reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) |
+ FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) |
+ FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT);
+ arm_smmu_gr0_write(smmu, last_s2cr, reg);
+ reg = arm_smmu_gr0_read(smmu, last_s2cr);
+ if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) {
+ qsmmu->bypass_quirk = true;
+ qsmmu->bypass_cbndx = smmu->num_context_banks - 1;
+
+ set_bit(qsmmu->bypass_cbndx, smmu->context_map);
+
+ arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
+
+ reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
+ arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
+ }
+
+ for (i = 0; i < smmu->num_mapping_groups; i++) {
+ smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+
+ if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
+ /* Ignore valid bit for SMR mask extraction. */
+ smr &= ~ARM_SMMU_SMR_VALID;
+ smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
+ smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
+ smmu->smrs[i].valid = true;
+
+ smmu->s2crs[i].type = S2CR_TYPE_BYPASS;
+ smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT;
+ smmu->s2crs[i].cbndx = 0xff;
+ }
+ }
+
+ return 0;
+}
+
+static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
+{
+ struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ u32 cbndx = s2cr->cbndx;
+ u32 type = s2cr->type;
+ u32 reg;
+
+ if (qsmmu->bypass_quirk) {
+ if (type == S2CR_TYPE_BYPASS) {
+ /*
+ * Firmware with quirky S2CR handling will substitute
+ * BYPASS writes with FAULT, so point the stream to the
+ * reserved context bank and ask for translation on the
+ * stream
+ */
+ type = S2CR_TYPE_TRANS;
+ cbndx = qsmmu->bypass_cbndx;
+ } else if (type == S2CR_TYPE_FAULT) {
+ /*
+ * Firmware with quirky S2CR handling will ignore FAULT
+ * writes, so trick it to write FAULT by asking for a
+ * BYPASS.
+ */
+ type = S2CR_TYPE_BYPASS;
+ cbndx = 0xff;
+ }
+ }
+
+ reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) |
+ FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) |
+ FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
+}
+
+static int qcom_smmu_def_domain_type(struct device *dev)
+{
+ const struct of_device_id *match =
+ of_match_device(qcom_smmu_client_of_match, dev);
+
+ return match ? IOMMU_DOMAIN_IDENTITY : 0;
+}
+
+static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
+{
+ int ret;
+
+ arm_mmu500_reset(smmu);
+
+ /*
+ * To address performance degradation in non-real time clients,
+ * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
+ * such as MTP and db845, whose firmwares implement secure monitor
+ * call handlers to turn on/off the wait-for-safe logic.
+ */
+ ret = qcom_scm_qsmmu500_wait_safe_toggle(0);
+ if (ret)
+ dev_warn(smmu->dev, "Failed to turn off SAFE logic\n");
+
+ return ret;
+}
+
+static const struct arm_smmu_impl qcom_smmu_v2_impl = {
+ .init_context = qcom_smmu_init_context,
+ .cfg_probe = qcom_smmu_cfg_probe,
+ .def_domain_type = qcom_smmu_def_domain_type,
+ .write_s2cr = qcom_smmu_write_s2cr,
+ .tlb_sync = qcom_smmu_tlb_sync,
+};
+
+static const struct arm_smmu_impl qcom_smmu_500_impl = {
+ .init_context = qcom_smmu_init_context,
+ .cfg_probe = qcom_smmu_cfg_probe,
+ .def_domain_type = qcom_smmu_def_domain_type,
+ .reset = arm_mmu500_reset,
+ .write_s2cr = qcom_smmu_write_s2cr,
+ .tlb_sync = qcom_smmu_tlb_sync,
+};
+
+static const struct arm_smmu_impl sdm845_smmu_500_impl = {
+ .init_context = qcom_smmu_init_context,
+ .cfg_probe = qcom_smmu_cfg_probe,
+ .def_domain_type = qcom_smmu_def_domain_type,
+ .reset = qcom_sdm845_smmu500_reset,
+ .write_s2cr = qcom_smmu_write_s2cr,
+ .tlb_sync = qcom_smmu_tlb_sync,
+};
+
+static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
+ .init_context = qcom_adreno_smmu_init_context,
+ .def_domain_type = qcom_smmu_def_domain_type,
+ .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
+ .write_sctlr = qcom_adreno_smmu_write_sctlr,
+ .tlb_sync = qcom_smmu_tlb_sync,
+};
+
+static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
+ .init_context = qcom_adreno_smmu_init_context,
+ .def_domain_type = qcom_smmu_def_domain_type,
+ .reset = arm_mmu500_reset,
+ .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
+ .write_sctlr = qcom_adreno_smmu_write_sctlr,
+ .tlb_sync = qcom_smmu_tlb_sync,
+};
+
+static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
+ const struct qcom_smmu_match_data *data)
+{
+ const struct device_node *np = smmu->dev->of_node;
+ const struct arm_smmu_impl *impl;
+ struct qcom_smmu *qsmmu;
+
+ if (!data)
+ return ERR_PTR(-EINVAL);
+
+ if (np && of_device_is_compatible(np, "qcom,adreno-smmu"))
+ impl = data->adreno_impl;
+ else
+ impl = data->impl;
+
+ if (!impl)
+ return smmu;
+
+ /* Check to make sure qcom_scm has finished probing */
+ if (!qcom_scm_is_available())
+ return ERR_PTR(-EPROBE_DEFER);
+
+ qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL);
+ if (!qsmmu)
+ return ERR_PTR(-ENOMEM);
+
+ qsmmu->smmu.impl = impl;
+ qsmmu->cfg = data->cfg;
+
+ return &qsmmu->smmu;
+}
+
+/* Implementation Defined Register Space 0 register offsets */
+static const u32 qcom_smmu_impl0_reg_offset[] = {
+ [QCOM_SMMU_TBU_PWR_STATUS] = 0x2204,
+ [QCOM_SMMU_STATS_SYNC_INV_TBU_ACK] = 0x25dc,
+ [QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR] = 0x2670,
+};
+
+static const struct qcom_smmu_config qcom_smmu_impl0_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+/*
+ * It is not yet possible to use MDP SMMU with the bypass quirk on the msm8996,
+ * there are not enough context banks.
+ */
+static const struct qcom_smmu_match_data msm8996_smmu_data = {
+ .impl = NULL,
+ .adreno_impl = &qcom_adreno_smmu_v2_impl,
+};
+
+static const struct qcom_smmu_match_data qcom_smmu_v2_data = {
+ .impl = &qcom_smmu_v2_impl,
+ .adreno_impl = &qcom_adreno_smmu_v2_impl,
+};
+
+static const struct qcom_smmu_match_data sdm845_smmu_500_data = {
+ .impl = &sdm845_smmu_500_impl,
+ /*
+ * No need for adreno impl here. On sdm845 the Adreno SMMU is handled
+ * by the separate sdm845-smmu-v2 device.
+ */
+ /* Also no debug configuration. */
+};
+
+static const struct qcom_smmu_match_data qcom_smmu_500_impl0_data = {
+ .impl = &qcom_smmu_500_impl,
+ .adreno_impl = &qcom_adreno_smmu_500_impl,
+ .cfg = &qcom_smmu_impl0_cfg,
+};
+
+/*
+ * Do not add any more qcom,SOC-smmu-500 entries to this list, unless they need
+ * special handling and can not be covered by the qcom,smmu-500 entry.
+ */
+static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
+ { .compatible = "qcom,msm8996-smmu-v2", .data = &msm8996_smmu_data },
+ { .compatible = "qcom,msm8998-smmu-v2", .data = &qcom_smmu_v2_data },
+ { .compatible = "qcom,qcm2290-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,qdu1000-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sc7180-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sc7180-smmu-v2", .data = &qcom_smmu_v2_data },
+ { .compatible = "qcom,sc7280-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sdm630-smmu-v2", .data = &qcom_smmu_v2_data },
+ { .compatible = "qcom,sdm845-smmu-v2", .data = &qcom_smmu_v2_data },
+ { .compatible = "qcom,sdm845-smmu-500", .data = &sdm845_smmu_500_data },
+ { .compatible = "qcom,sm6115-smmu-500", .data = &qcom_smmu_500_impl0_data},
+ { .compatible = "qcom,sm6125-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sm6350-smmu-v2", .data = &qcom_smmu_v2_data },
+ { .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sm6375-smmu-v2", .data = &qcom_smmu_v2_data },
+ { .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sm8350-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sm8450-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { }
+};
+
+#ifdef CONFIG_ACPI
+static struct acpi_platform_list qcom_acpi_platlist[] = {
+ { "LENOVO", "CB-01 ", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
+ { "QCOM ", "QCOMEDK2", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
+ { }
+};
+#endif
+
+struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+ const struct device_node *np = smmu->dev->of_node;
+ const struct of_device_id *match;
+
+#ifdef CONFIG_ACPI
+ if (np == NULL) {
+ /* Match platform for ACPI boot */
+ if (acpi_match_platform_list(qcom_acpi_platlist) >= 0)
+ return qcom_smmu_create(smmu, &qcom_smmu_500_impl0_data);
+ }
+#endif
+
+ match = of_match_node(qcom_smmu_impl_of_match, np);
+ if (match)
+ return qcom_smmu_create(smmu, match->data);
+
+ /*
+ * If you hit this WARN_ON() you are missing an entry in the
+ * qcom_smmu_impl_of_match[] table, and GPU per-process page-
+ * tables will be broken.
+ */
+ WARN(of_device_is_compatible(np, "qcom,adreno-smmu"),
+ "Missing qcom_smmu_impl_of_match entry for: %s",
+ dev_name(smmu->dev));
+
+ return smmu;
+}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
new file mode 100644
index 0000000000..593910567b
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ARM_SMMU_QCOM_H
+#define _ARM_SMMU_QCOM_H
+
+struct qcom_smmu {
+ struct arm_smmu_device smmu;
+ const struct qcom_smmu_config *cfg;
+ bool bypass_quirk;
+ u8 bypass_cbndx;
+ u32 stall_enabled;
+};
+
+enum qcom_smmu_impl_reg_offset {
+ QCOM_SMMU_TBU_PWR_STATUS,
+ QCOM_SMMU_STATS_SYNC_INV_TBU_ACK,
+ QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR,
+};
+
+struct qcom_smmu_config {
+ const u32 *reg_offset;
+};
+
+struct qcom_smmu_match_data {
+ const struct qcom_smmu_config *cfg;
+ const struct arm_smmu_impl *impl;
+ const struct arm_smmu_impl *adreno_impl;
+};
+
+#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
+void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu);
+#else
+static inline void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) { }
+#endif
+
+#endif /* _ARM_SMMU_QCOM_H */
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
new file mode 100644
index 0000000000..d6d1a2a55c
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -0,0 +1,2304 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IOMMU API for ARM architected SMMU implementations.
+ *
+ * Copyright (C) 2013 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ *
+ * This driver currently supports:
+ * - SMMUv1 and v2 implementations
+ * - Stream-matching and stream-indexing
+ * - v7/v8 long-descriptor format
+ * - Non-secure access to the SMMU
+ * - Context fault reporting
+ * - Extended Stream ID (16 bit)
+ */
+
+#define pr_fmt(fmt) "arm-smmu: " fmt
+
+#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+
+#include <linux/fsl/mc.h>
+
+#include "arm-smmu.h"
+#include "../../dma-iommu.h"
+
+/*
+ * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
+ * global register space are still, in fact, using a hypervisor to mediate it
+ * by trapping and emulating register accesses. Sadly, some deployed versions
+ * of said trapping code have bugs wherein they go horribly wrong for stores
+ * using r31 (i.e. XZR/WZR) as the source register.
+ */
+#define QCOM_DUMMY_VAL -1
+
+#define MSI_IOVA_BASE 0x8000000
+#define MSI_IOVA_LENGTH 0x100000
+
+static int force_stage;
+module_param(force_stage, int, S_IRUGO);
+MODULE_PARM_DESC(force_stage,
+ "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
+static bool disable_bypass =
+ IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
+module_param(disable_bypass, bool, S_IRUGO);
+MODULE_PARM_DESC(disable_bypass,
+ "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
+
+#define s2cr_init_val (struct arm_smmu_s2cr){ \
+ .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
+}
+
+static bool using_legacy_binding, using_generic_binding;
+
+static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
+{
+ if (pm_runtime_enabled(smmu->dev))
+ return pm_runtime_resume_and_get(smmu->dev);
+
+ return 0;
+}
+
+static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
+{
+ if (pm_runtime_enabled(smmu->dev))
+ pm_runtime_put_autosuspend(smmu->dev);
+}
+
+static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct arm_smmu_domain, domain);
+}
+
+static struct platform_driver arm_smmu_driver;
+static struct iommu_ops arm_smmu_ops;
+
+#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
+static struct device_node *dev_get_dev_node(struct device *dev)
+{
+ if (dev_is_pci(dev)) {
+ struct pci_bus *bus = to_pci_dev(dev)->bus;
+
+ while (!pci_is_root_bus(bus))
+ bus = bus->parent;
+ return of_node_get(bus->bridge->parent->of_node);
+ }
+
+ return of_node_get(dev->of_node);
+}
+
+static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
+{
+ *((__be32 *)data) = cpu_to_be32(alias);
+ return 0; /* Continue walking */
+}
+
+static int __find_legacy_master_phandle(struct device *dev, void *data)
+{
+ struct of_phandle_iterator *it = *(void **)data;
+ struct device_node *np = it->node;
+ int err;
+
+ of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
+ "#stream-id-cells", -1)
+ if (it->node == np) {
+ *(void **)data = dev;
+ return 1;
+ }
+ it->node = np;
+ return err == -ENOENT ? 0 : err;
+}
+
+static int arm_smmu_register_legacy_master(struct device *dev,
+ struct arm_smmu_device **smmu)
+{
+ struct device *smmu_dev;
+ struct device_node *np;
+ struct of_phandle_iterator it;
+ void *data = &it;
+ u32 *sids;
+ __be32 pci_sid;
+ int err;
+
+ np = dev_get_dev_node(dev);
+ if (!np || !of_property_present(np, "#stream-id-cells")) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+
+ it.node = np;
+ err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
+ __find_legacy_master_phandle);
+ smmu_dev = data;
+ of_node_put(np);
+ if (err == 0)
+ return -ENODEV;
+ if (err < 0)
+ return err;
+
+ if (dev_is_pci(dev)) {
+ /* "mmu-masters" assumes Stream ID == Requester ID */
+ pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
+ &pci_sid);
+ it.cur = &pci_sid;
+ it.cur_count = 1;
+ }
+
+ err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
+ &arm_smmu_ops);
+ if (err)
+ return err;
+
+ sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
+ if (!sids)
+ return -ENOMEM;
+
+ *smmu = dev_get_drvdata(smmu_dev);
+ of_phandle_iterator_args(&it, sids, it.cur_count);
+ err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
+ kfree(sids);
+ return err;
+}
+#else
+static int arm_smmu_register_legacy_master(struct device *dev,
+ struct arm_smmu_device **smmu)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
+
+static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
+{
+ clear_bit(idx, map);
+}
+
+/* Wait for any pending TLB invalidations to complete */
+static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
+ int sync, int status)
+{
+ unsigned int spin_cnt, delay;
+ u32 reg;
+
+ if (smmu->impl && unlikely(smmu->impl->tlb_sync))
+ return smmu->impl->tlb_sync(smmu, page, sync, status);
+
+ arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
+ for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
+ for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
+ reg = arm_smmu_readl(smmu, page, status);
+ if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
+ return;
+ cpu_relax();
+ }
+ udelay(delay);
+ }
+ dev_err_ratelimited(smmu->dev,
+ "TLB sync timed out -- SMMU may be deadlocked\n");
+}
+
+static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&smmu->global_sync_lock, flags);
+ __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
+ ARM_SMMU_GR0_sTLBGSTATUS);
+ spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
+}
+
+static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smmu_domain->cb_lock, flags);
+ __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
+ ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
+ spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
+}
+
+static void arm_smmu_tlb_inv_context_s1(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ /*
+ * The TLBI write may be relaxed, so ensure that PTEs cleared by the
+ * current CPU are visible beforehand.
+ */
+ wmb();
+ arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
+ ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
+ arm_smmu_tlb_sync_context(smmu_domain);
+}
+
+static void arm_smmu_tlb_inv_context_s2(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ /* See above */
+ wmb();
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
+ arm_smmu_tlb_sync_global(smmu);
+}
+
+static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
+ size_t granule, void *cookie, int reg)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ int idx = cfg->cbndx;
+
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ wmb();
+
+ if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
+ iova = (iova >> 12) << 12;
+ iova |= cfg->asid;
+ do {
+ arm_smmu_cb_write(smmu, idx, reg, iova);
+ iova += granule;
+ } while (size -= granule);
+ } else {
+ iova >>= 12;
+ iova |= (u64)cfg->asid << 48;
+ do {
+ arm_smmu_cb_writeq(smmu, idx, reg, iova);
+ iova += granule >> 12;
+ } while (size -= granule);
+ }
+}
+
+static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
+ size_t granule, void *cookie, int reg)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ int idx = smmu_domain->cfg.cbndx;
+
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ wmb();
+
+ iova >>= 12;
+ do {
+ if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
+ arm_smmu_cb_writeq(smmu, idx, reg, iova);
+ else
+ arm_smmu_cb_write(smmu, idx, reg, iova);
+ iova += granule >> 12;
+ } while (size -= granule);
+}
+
+static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+
+ if (cfg->flush_walk_prefer_tlbiasid) {
+ arm_smmu_tlb_inv_context_s1(cookie);
+ } else {
+ arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
+ ARM_SMMU_CB_S1_TLBIVA);
+ arm_smmu_tlb_sync_context(cookie);
+ }
+}
+
+static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
+ ARM_SMMU_CB_S1_TLBIVAL);
+}
+
+static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
+ ARM_SMMU_CB_S2_TLBIIPAS2);
+ arm_smmu_tlb_sync_context(cookie);
+}
+
+static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
+ ARM_SMMU_CB_S2_TLBIIPAS2L);
+}
+
+static void arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_context_s2(cookie);
+}
+/*
+ * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
+ * almost negligible, but the benefit of getting the first one in as far ahead
+ * of the sync as possible is significant, hence we don't just make this a
+ * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
+ * think.
+ */
+static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ wmb();
+
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
+}
+
+static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
+ .tlb_add_page = arm_smmu_tlb_add_page_s1,
+};
+
+static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
+ .tlb_add_page = arm_smmu_tlb_add_page_s2,
+};
+
+static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2_v1,
+ .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
+};
+
+static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
+{
+ u32 fsr, fsynr, cbfrsynra;
+ unsigned long iova;
+ struct iommu_domain *domain = dev;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ int idx = smmu_domain->cfg.cbndx;
+ int ret;
+
+ fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
+ if (!(fsr & ARM_SMMU_FSR_FAULT))
+ return IRQ_NONE;
+
+ fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
+ iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
+ cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
+
+ ret = report_iommu_fault(domain, NULL, iova,
+ fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
+
+ if (ret == -ENOSYS)
+ dev_err_ratelimited(smmu->dev,
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
+ fsr, iova, fsynr, cbfrsynra, idx);
+
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
+{
+ u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
+ struct arm_smmu_device *smmu = dev;
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
+ gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
+ gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
+ gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
+
+ if (!gfsr)
+ return IRQ_NONE;
+
+ if (__ratelimit(&rs)) {
+ if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
+ (gfsr & ARM_SMMU_sGFSR_USF))
+ dev_err(smmu->dev,
+ "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
+ (u16)gfsynr1);
+ else
+ dev_err(smmu->dev,
+ "Unexpected global fault, this could be serious\n");
+ dev_err(smmu->dev,
+ "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
+ gfsr, gfsynr0, gfsynr1, gfsynr2);
+ }
+
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
+ return IRQ_HANDLED;
+}
+
+static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg)
+{
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
+ bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+
+ cb->cfg = cfg;
+
+ /* TCR */
+ if (stage1) {
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
+ cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
+ } else {
+ cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
+ cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
+ cb->tcr[1] |= ARM_SMMU_TCR2_AS;
+ else
+ cb->tcr[0] |= ARM_SMMU_TCR_EAE;
+ }
+ } else {
+ cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
+ }
+
+ /* TTBRs */
+ if (stage1) {
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
+ cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
+ cb->ttbr[1] = 0;
+ } else {
+ cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
+ cfg->asid);
+ cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
+ cfg->asid);
+
+ if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+ cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+ else
+ cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+ }
+ } else {
+ cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
+ }
+
+ /* MAIRs (stage-1 only) */
+ if (stage1) {
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
+ cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
+ cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
+ } else {
+ cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
+ cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
+ }
+ }
+}
+
+void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
+{
+ u32 reg;
+ bool stage1;
+ struct arm_smmu_cb *cb = &smmu->cbs[idx];
+ struct arm_smmu_cfg *cfg = cb->cfg;
+
+ /* Unassigned context banks only need disabling */
+ if (!cfg) {
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
+ return;
+ }
+
+ stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+
+ /* CBA2R */
+ if (smmu->version > ARM_SMMU_V1) {
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
+ reg = ARM_SMMU_CBA2R_VA64;
+ else
+ reg = 0;
+ /* 16-bit VMIDs live in CBA2R */
+ if (smmu->features & ARM_SMMU_FEAT_VMID16)
+ reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
+
+ arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
+ }
+
+ /* CBAR */
+ reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
+ if (smmu->version < ARM_SMMU_V2)
+ reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
+
+ /*
+ * Use the weakest shareability/memory types, so they are
+ * overridden by the ttbcr/pte.
+ */
+ if (stage1) {
+ reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG,
+ ARM_SMMU_CBAR_S1_BPSHCFG_NSH) |
+ FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR,
+ ARM_SMMU_CBAR_S1_MEMATTR_WB);
+ } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
+ /* 8-bit VMIDs live in CBAR */
+ reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
+ }
+ arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
+
+ /*
+ * TCR
+ * We must write this before the TTBRs, since it determines the
+ * access behaviour of some fields (in particular, ASID[15:8]).
+ */
+ if (stage1 && smmu->version > ARM_SMMU_V1)
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
+
+ /* TTBRs */
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
+ } else {
+ arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
+ if (stage1)
+ arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
+ cb->ttbr[1]);
+ }
+
+ /* MAIRs (stage-1 only) */
+ if (stage1) {
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
+ }
+
+ /* SCTLR */
+ reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
+ ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
+ if (stage1)
+ reg |= ARM_SMMU_SCTLR_S1_ASIDPNE;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ reg |= ARM_SMMU_SCTLR_E;
+
+ if (smmu->impl && smmu->impl->write_sctlr)
+ smmu->impl->write_sctlr(smmu, idx, reg);
+ else
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
+}
+
+static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_device *smmu,
+ struct device *dev, unsigned int start)
+{
+ if (smmu->impl && smmu->impl->alloc_context_bank)
+ return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
+
+ return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
+}
+
+static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+ struct arm_smmu_device *smmu,
+ struct device *dev)
+{
+ int irq, start, ret = 0;
+ unsigned long ias, oas;
+ struct io_pgtable_ops *pgtbl_ops;
+ struct io_pgtable_cfg pgtbl_cfg;
+ enum io_pgtable_fmt fmt;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ irqreturn_t (*context_fault)(int irq, void *dev);
+
+ mutex_lock(&smmu_domain->init_mutex);
+ if (smmu_domain->smmu)
+ goto out_unlock;
+
+ if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
+ smmu_domain->smmu = smmu;
+ goto out_unlock;
+ }
+
+ /*
+ * Mapping the requested stage onto what we support is surprisingly
+ * complicated, mainly because the spec allows S1+S2 SMMUs without
+ * support for nested translation. That means we end up with the
+ * following table:
+ *
+ * Requested Supported Actual
+ * S1 N S1
+ * S1 S1+S2 S1
+ * S1 S2 S2
+ * S1 S1 S1
+ * N N N
+ * N S1+S2 S2
+ * N S2 S2
+ * N S1 S1
+ *
+ * Note that you can't actually request stage-2 mappings.
+ */
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+
+ /*
+ * Choosing a suitable context format is even more fiddly. Until we
+ * grow some way for the caller to express a preference, and/or move
+ * the decision into the io-pgtable code where it arguably belongs,
+ * just aim for the closest thing to the rest of the system, and hope
+ * that the hardware isn't esoteric enough that we can't assume AArch64
+ * support to be a superset of AArch32 support...
+ */
+ if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
+ cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
+ if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
+ !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
+ (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
+ (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
+ cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
+ if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
+ (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
+ ARM_SMMU_FEAT_FMT_AARCH64_16K |
+ ARM_SMMU_FEAT_FMT_AARCH64_4K)))
+ cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
+
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ switch (smmu_domain->stage) {
+ case ARM_SMMU_DOMAIN_S1:
+ cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
+ start = smmu->num_s2_context_banks;
+ ias = smmu->va_size;
+ oas = smmu->ipa_size;
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
+ fmt = ARM_64_LPAE_S1;
+ } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
+ fmt = ARM_32_LPAE_S1;
+ ias = min(ias, 32UL);
+ oas = min(oas, 40UL);
+ } else {
+ fmt = ARM_V7S;
+ ias = min(ias, 32UL);
+ oas = min(oas, 32UL);
+ }
+ smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
+ break;
+ case ARM_SMMU_DOMAIN_NESTED:
+ /*
+ * We will likely want to change this if/when KVM gets
+ * involved.
+ */
+ case ARM_SMMU_DOMAIN_S2:
+ cfg->cbar = CBAR_TYPE_S2_TRANS;
+ start = 0;
+ ias = smmu->ipa_size;
+ oas = smmu->pa_size;
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
+ fmt = ARM_64_LPAE_S2;
+ } else {
+ fmt = ARM_32_LPAE_S2;
+ ias = min(ias, 40UL);
+ oas = min(oas, 40UL);
+ }
+ if (smmu->version == ARM_SMMU_V2)
+ smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
+ else
+ smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
+ if (ret < 0) {
+ goto out_unlock;
+ }
+
+ smmu_domain->smmu = smmu;
+
+ cfg->cbndx = ret;
+ if (smmu->version < ARM_SMMU_V2) {
+ cfg->irptndx = atomic_inc_return(&smmu->irptndx);
+ cfg->irptndx %= smmu->num_context_irqs;
+ } else {
+ cfg->irptndx = cfg->cbndx;
+ }
+
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
+ cfg->vmid = cfg->cbndx + 1;
+ else
+ cfg->asid = cfg->cbndx;
+
+ pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = smmu->pgsize_bitmap,
+ .ias = ias,
+ .oas = oas,
+ .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
+ .tlb = smmu_domain->flush_ops,
+ .iommu_dev = smmu->dev,
+ };
+
+ if (smmu->impl && smmu->impl->init_context) {
+ ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
+ if (ret)
+ goto out_clear_smmu;
+ }
+
+ if (smmu_domain->pgtbl_quirks)
+ pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks;
+
+ pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
+ if (!pgtbl_ops) {
+ ret = -ENOMEM;
+ goto out_clear_smmu;
+ }
+
+ /* Update the domain's page sizes to reflect the page table format */
+ domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+
+ if (pgtbl_cfg.quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
+ domain->geometry.aperture_start = ~0UL << ias;
+ domain->geometry.aperture_end = ~0UL;
+ } else {
+ domain->geometry.aperture_end = (1UL << ias) - 1;
+ }
+
+ domain->geometry.force_aperture = true;
+
+ /* Initialise the context bank with our page table cfg */
+ arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
+ arm_smmu_write_context_bank(smmu, cfg->cbndx);
+
+ /*
+ * Request context fault interrupt. Do this last to avoid the
+ * handler seeing a half-initialised domain state.
+ */
+ irq = smmu->irqs[cfg->irptndx];
+
+ if (smmu->impl && smmu->impl->context_fault)
+ context_fault = smmu->impl->context_fault;
+ else
+ context_fault = arm_smmu_context_fault;
+
+ ret = devm_request_irq(smmu->dev, irq, context_fault,
+ IRQF_SHARED, "arm-smmu-context-fault", domain);
+ if (ret < 0) {
+ dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
+ cfg->irptndx, irq);
+ cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
+ }
+
+ mutex_unlock(&smmu_domain->init_mutex);
+
+ /* Publish page table ops for map/unmap */
+ smmu_domain->pgtbl_ops = pgtbl_ops;
+ return 0;
+
+out_clear_smmu:
+ __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+ smmu_domain->smmu = NULL;
+out_unlock:
+ mutex_unlock(&smmu_domain->init_mutex);
+ return ret;
+}
+
+static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ int ret, irq;
+
+ if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
+ return;
+
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ return;
+
+ /*
+ * Disable the context bank and free the page tables before freeing
+ * it.
+ */
+ smmu->cbs[cfg->cbndx].cfg = NULL;
+ arm_smmu_write_context_bank(smmu, cfg->cbndx);
+
+ if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
+ irq = smmu->irqs[cfg->irptndx];
+ devm_free_irq(smmu->dev, irq, domain);
+ }
+
+ free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+ __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+
+ arm_smmu_rpm_put(smmu);
+}
+
+static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
+{
+ struct arm_smmu_domain *smmu_domain;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_IDENTITY) {
+ if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
+ return NULL;
+ }
+ /*
+ * Allocate the domain and initialise some of its data structures.
+ * We can't really do anything meaningful until we've added a
+ * master.
+ */
+ smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
+ if (!smmu_domain)
+ return NULL;
+
+ mutex_init(&smmu_domain->init_mutex);
+ spin_lock_init(&smmu_domain->cb_lock);
+
+ return &smmu_domain->domain;
+}
+
+static void arm_smmu_domain_free(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ /*
+ * Free the domain resources. We assume that all devices have
+ * already been detached.
+ */
+ arm_smmu_destroy_domain_context(domain);
+ kfree(smmu_domain);
+}
+
+static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
+{
+ struct arm_smmu_smr *smr = smmu->smrs + idx;
+ u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
+ FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
+
+ if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
+ reg |= ARM_SMMU_SMR_VALID;
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
+}
+
+static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
+{
+ struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
+ u32 reg;
+
+ if (smmu->impl && smmu->impl->write_s2cr) {
+ smmu->impl->write_s2cr(smmu, idx);
+ return;
+ }
+
+ reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
+ FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
+ FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
+
+ if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
+ smmu->smrs[idx].valid)
+ reg |= ARM_SMMU_S2CR_EXIDVALID;
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
+}
+
+static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
+{
+ arm_smmu_write_s2cr(smmu, idx);
+ if (smmu->smrs)
+ arm_smmu_write_smr(smmu, idx);
+}
+
+/*
+ * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
+ * should be called after sCR0 is written.
+ */
+static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
+{
+ u32 smr;
+ int i;
+
+ if (!smmu->smrs)
+ return;
+ /*
+ * If we've had to accommodate firmware memory regions, we may
+ * have live SMRs by now; tread carefully...
+ *
+ * Somewhat perversely, not having a free SMR for this test implies we
+ * can get away without it anyway, as we'll only be able to 'allocate'
+ * these SMRs for the ID/mask values we're already trusting to be OK.
+ */
+ for (i = 0; i < smmu->num_mapping_groups; i++)
+ if (!smmu->smrs[i].valid)
+ goto smr_ok;
+ return;
+smr_ok:
+ /*
+ * SMR.ID bits may not be preserved if the corresponding MASK
+ * bits are set, so check each one separately. We can reject
+ * masters later if they try to claim IDs outside these masks.
+ */
+ smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
+ smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+ smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
+
+ smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
+ smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+ smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
+}
+
+static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
+{
+ struct arm_smmu_smr *smrs = smmu->smrs;
+ int i, free_idx = -ENOSPC;
+
+ /* Stream indexing is blissfully easy */
+ if (!smrs)
+ return id;
+
+ /* Validating SMRs is... less so */
+ for (i = 0; i < smmu->num_mapping_groups; ++i) {
+ if (!smrs[i].valid) {
+ /*
+ * Note the first free entry we come across, which
+ * we'll claim in the end if nothing else matches.
+ */
+ if (free_idx < 0)
+ free_idx = i;
+ continue;
+ }
+ /*
+ * If the new entry is _entirely_ matched by an existing entry,
+ * then reuse that, with the guarantee that there also cannot
+ * be any subsequent conflicting entries. In normal use we'd
+ * expect simply identical entries for this case, but there's
+ * no harm in accommodating the generalisation.
+ */
+ if ((mask & smrs[i].mask) == mask &&
+ !((id ^ smrs[i].id) & ~smrs[i].mask))
+ return i;
+ /*
+ * If the new entry has any other overlap with an existing one,
+ * though, then there always exists at least one stream ID
+ * which would cause a conflict, and we can't allow that risk.
+ */
+ if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
+ return -EINVAL;
+ }
+
+ return free_idx;
+}
+
+static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
+{
+ if (--smmu->s2crs[idx].count)
+ return false;
+
+ smmu->s2crs[idx] = s2cr_init_val;
+ if (smmu->smrs)
+ smmu->smrs[idx].valid = false;
+
+ return true;
+}
+
+static int arm_smmu_master_alloc_smes(struct device *dev)
+{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+ struct arm_smmu_device *smmu = cfg->smmu;
+ struct arm_smmu_smr *smrs = smmu->smrs;
+ int i, idx, ret;
+
+ mutex_lock(&smmu->stream_map_mutex);
+ /* Figure out a viable stream map entry allocation */
+ for_each_cfg_sme(cfg, fwspec, i, idx) {
+ u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
+ u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
+
+ if (idx != INVALID_SMENDX) {
+ ret = -EEXIST;
+ goto out_err;
+ }
+
+ ret = arm_smmu_find_sme(smmu, sid, mask);
+ if (ret < 0)
+ goto out_err;
+
+ idx = ret;
+ if (smrs && smmu->s2crs[idx].count == 0) {
+ smrs[idx].id = sid;
+ smrs[idx].mask = mask;
+ smrs[idx].valid = true;
+ }
+ smmu->s2crs[idx].count++;
+ cfg->smendx[i] = (s16)idx;
+ }
+
+ /* It worked! Now, poke the actual hardware */
+ for_each_cfg_sme(cfg, fwspec, i, idx)
+ arm_smmu_write_sme(smmu, idx);
+
+ mutex_unlock(&smmu->stream_map_mutex);
+ return 0;
+
+out_err:
+ while (i--) {
+ arm_smmu_free_sme(smmu, cfg->smendx[i]);
+ cfg->smendx[i] = INVALID_SMENDX;
+ }
+ mutex_unlock(&smmu->stream_map_mutex);
+ return ret;
+}
+
+static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
+ struct iommu_fwspec *fwspec)
+{
+ struct arm_smmu_device *smmu = cfg->smmu;
+ int i, idx;
+
+ mutex_lock(&smmu->stream_map_mutex);
+ for_each_cfg_sme(cfg, fwspec, i, idx) {
+ if (arm_smmu_free_sme(smmu, idx))
+ arm_smmu_write_sme(smmu, idx);
+ cfg->smendx[i] = INVALID_SMENDX;
+ }
+ mutex_unlock(&smmu->stream_map_mutex);
+}
+
+static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_master_cfg *cfg,
+ struct iommu_fwspec *fwspec)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_s2cr *s2cr = smmu->s2crs;
+ u8 cbndx = smmu_domain->cfg.cbndx;
+ enum arm_smmu_s2cr_type type;
+ int i, idx;
+
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
+ type = S2CR_TYPE_BYPASS;
+ else
+ type = S2CR_TYPE_TRANS;
+
+ for_each_cfg_sme(cfg, fwspec, i, idx) {
+ if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
+ continue;
+
+ s2cr[idx].type = type;
+ s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
+ s2cr[idx].cbndx = cbndx;
+ arm_smmu_write_s2cr(smmu, idx);
+ }
+ return 0;
+}
+
+static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct arm_smmu_master_cfg *cfg;
+ struct arm_smmu_device *smmu;
+ int ret;
+
+ if (!fwspec || fwspec->ops != &arm_smmu_ops) {
+ dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
+ return -ENXIO;
+ }
+
+ /*
+ * FIXME: The arch/arm DMA API code tries to attach devices to its own
+ * domains between of_xlate() and probe_device() - we have no way to cope
+ * with that, so until ARM gets converted to rely on groups and default
+ * domains, just say no (but more politely than by dereferencing NULL).
+ * This should be at least a WARN_ON once that's sorted.
+ */
+ cfg = dev_iommu_priv_get(dev);
+ if (!cfg)
+ return -ENODEV;
+
+ smmu = cfg->smmu;
+
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ return ret;
+
+ /* Ensure that the domain is finalised */
+ ret = arm_smmu_init_domain_context(domain, smmu, dev);
+ if (ret < 0)
+ goto rpm_put;
+
+ /*
+ * Sanity check the domain. We don't support domains across
+ * different SMMUs.
+ */
+ if (smmu_domain->smmu != smmu) {
+ ret = -EINVAL;
+ goto rpm_put;
+ }
+
+ /* Looks ok, so add the device to the domain */
+ ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
+
+ /*
+ * Setup an autosuspend delay to avoid bouncing runpm state.
+ * Otherwise, if a driver for a suspended consumer device
+ * unmaps buffers, it will runpm resume/suspend for each one.
+ *
+ * For example, when used by a GPU device, when an application
+ * or game exits, it can trigger unmapping 100s or 1000s of
+ * buffers. With a runpm cycle for each buffer, that adds up
+ * to 5-10sec worth of reprogramming the context bank, while
+ * the system appears to be locked up to the user.
+ */
+ pm_runtime_set_autosuspend_delay(smmu->dev, 20);
+ pm_runtime_use_autosuspend(smmu->dev);
+
+rpm_put:
+ arm_smmu_rpm_put(smmu);
+ return ret;
+}
+
+static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
+{
+ struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
+ struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
+ int ret;
+
+ if (!ops)
+ return -ENODEV;
+
+ arm_smmu_rpm_get(smmu);
+ ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
+ arm_smmu_rpm_put(smmu);
+
+ return ret;
+}
+
+static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *iotlb_gather)
+{
+ struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
+ struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
+ size_t ret;
+
+ if (!ops)
+ return 0;
+
+ arm_smmu_rpm_get(smmu);
+ ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather);
+ arm_smmu_rpm_put(smmu);
+
+ return ret;
+}
+
+static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ if (smmu_domain->flush_ops) {
+ arm_smmu_rpm_get(smmu);
+ smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
+ arm_smmu_rpm_put(smmu);
+ }
+}
+
+static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ if (!smmu)
+ return;
+
+ arm_smmu_rpm_get(smmu);
+ if (smmu->version == ARM_SMMU_V2 ||
+ smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
+ arm_smmu_tlb_sync_context(smmu_domain);
+ else
+ arm_smmu_tlb_sync_global(smmu);
+ arm_smmu_rpm_put(smmu);
+}
+
+static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+ struct device *dev = smmu->dev;
+ void __iomem *reg;
+ u32 tmp;
+ u64 phys;
+ unsigned long va, flags;
+ int ret, idx = cfg->cbndx;
+ phys_addr_t addr = 0;
+
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ return 0;
+
+ spin_lock_irqsave(&smmu_domain->cb_lock, flags);
+ va = iova & ~0xfffUL;
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
+ arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
+ else
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
+
+ reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
+ if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE),
+ 5, 50)) {
+ spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
+ dev_err(dev,
+ "iova to phys timed out on %pad. Falling back to software table walk.\n",
+ &iova);
+ arm_smmu_rpm_put(smmu);
+ return ops->iova_to_phys(ops, iova);
+ }
+
+ phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
+ spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
+ if (phys & ARM_SMMU_CB_PAR_F) {
+ dev_err(dev, "translation fault!\n");
+ dev_err(dev, "PAR = 0x%llx\n", phys);
+ goto out;
+ }
+
+ addr = (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
+out:
+ arm_smmu_rpm_put(smmu);
+
+ return addr;
+}
+
+static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+
+ if (!ops)
+ return 0;
+
+ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
+ smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
+ return arm_smmu_iova_to_phys_hard(domain, iova);
+
+ return ops->iova_to_phys(ops, iova);
+}
+
+static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
+{
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+
+ switch (cap) {
+ case IOMMU_CAP_CACHE_COHERENCY:
+ /*
+ * It's overwhelmingly the case in practice that when the pagetable
+ * walk interface is connected to a coherent interconnect, all the
+ * translation interfaces are too. Furthermore if the device is
+ * natively coherent, then its translation interface must also be.
+ */
+ return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
+ device_get_dma_attr(dev) == DEV_DMA_COHERENT;
+ case IOMMU_CAP_NOEXEC:
+ case IOMMU_CAP_DEFERRED_FLUSH:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static
+struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
+{
+ struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
+ fwnode);
+ put_device(dev);
+ return dev ? dev_get_drvdata(dev) : NULL;
+}
+
+static struct iommu_device *arm_smmu_probe_device(struct device *dev)
+{
+ struct arm_smmu_device *smmu = NULL;
+ struct arm_smmu_master_cfg *cfg;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ int i, ret;
+
+ if (using_legacy_binding) {
+ ret = arm_smmu_register_legacy_master(dev, &smmu);
+
+ /*
+ * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
+ * will allocate/initialise a new one. Thus we need to update fwspec for
+ * later use.
+ */
+ fwspec = dev_iommu_fwspec_get(dev);
+ if (ret)
+ goto out_free;
+ } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
+ smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
+ } else {
+ return ERR_PTR(-ENODEV);
+ }
+
+ ret = -EINVAL;
+ for (i = 0; i < fwspec->num_ids; i++) {
+ u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
+ u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
+
+ if (sid & ~smmu->streamid_mask) {
+ dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
+ sid, smmu->streamid_mask);
+ goto out_free;
+ }
+ if (mask & ~smmu->smr_mask_mask) {
+ dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
+ mask, smmu->smr_mask_mask);
+ goto out_free;
+ }
+ }
+
+ ret = -ENOMEM;
+ cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
+ GFP_KERNEL);
+ if (!cfg)
+ goto out_free;
+
+ cfg->smmu = smmu;
+ dev_iommu_priv_set(dev, cfg);
+ while (i--)
+ cfg->smendx[i] = INVALID_SMENDX;
+
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ goto out_cfg_free;
+
+ ret = arm_smmu_master_alloc_smes(dev);
+ arm_smmu_rpm_put(smmu);
+
+ if (ret)
+ goto out_cfg_free;
+
+ device_link_add(dev, smmu->dev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
+
+ return &smmu->iommu;
+
+out_cfg_free:
+ kfree(cfg);
+out_free:
+ iommu_fwspec_free(dev);
+ return ERR_PTR(ret);
+}
+
+static void arm_smmu_release_device(struct device *dev)
+{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+ int ret;
+
+ ret = arm_smmu_rpm_get(cfg->smmu);
+ if (ret < 0)
+ return;
+
+ arm_smmu_master_free_smes(cfg, fwspec);
+
+ arm_smmu_rpm_put(cfg->smmu);
+
+ dev_iommu_priv_set(dev, NULL);
+ kfree(cfg);
+}
+
+static void arm_smmu_probe_finalize(struct device *dev)
+{
+ struct arm_smmu_master_cfg *cfg;
+ struct arm_smmu_device *smmu;
+
+ cfg = dev_iommu_priv_get(dev);
+ smmu = cfg->smmu;
+
+ if (smmu->impl && smmu->impl->probe_finalize)
+ smmu->impl->probe_finalize(smmu, dev);
+}
+
+static struct iommu_group *arm_smmu_device_group(struct device *dev)
+{
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct arm_smmu_device *smmu = cfg->smmu;
+ struct iommu_group *group = NULL;
+ int i, idx;
+
+ mutex_lock(&smmu->stream_map_mutex);
+ for_each_cfg_sme(cfg, fwspec, i, idx) {
+ if (group && smmu->s2crs[idx].group &&
+ group != smmu->s2crs[idx].group) {
+ mutex_unlock(&smmu->stream_map_mutex);
+ return ERR_PTR(-EINVAL);
+ }
+
+ group = smmu->s2crs[idx].group;
+ }
+
+ if (group) {
+ mutex_unlock(&smmu->stream_map_mutex);
+ return iommu_group_ref_get(group);
+ }
+
+ if (dev_is_pci(dev))
+ group = pci_device_group(dev);
+ else if (dev_is_fsl_mc(dev))
+ group = fsl_mc_device_group(dev);
+ else
+ group = generic_device_group(dev);
+
+ /* Remember group for faster lookups */
+ if (!IS_ERR(group))
+ for_each_cfg_sme(cfg, fwspec, i, idx)
+ smmu->s2crs[idx].group = group;
+
+ mutex_unlock(&smmu->stream_map_mutex);
+ return group;
+}
+
+static int arm_smmu_enable_nesting(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ int ret = 0;
+
+ mutex_lock(&smmu_domain->init_mutex);
+ if (smmu_domain->smmu)
+ ret = -EPERM;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ mutex_unlock(&smmu_domain->init_mutex);
+
+ return ret;
+}
+
+static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain,
+ unsigned long quirks)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ int ret = 0;
+
+ mutex_lock(&smmu_domain->init_mutex);
+ if (smmu_domain->smmu)
+ ret = -EPERM;
+ else
+ smmu_domain->pgtbl_quirks = quirks;
+ mutex_unlock(&smmu_domain->init_mutex);
+
+ return ret;
+}
+
+static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+ u32 mask, fwid = 0;
+
+ if (args->args_count > 0)
+ fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
+
+ if (args->args_count > 1)
+ fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
+ else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
+ fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask);
+
+ return iommu_fwspec_add_ids(dev, &fwid, 1);
+}
+
+static void arm_smmu_get_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_resv_region *region;
+ int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+
+ region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
+ prot, IOMMU_RESV_SW_MSI, GFP_KERNEL);
+ if (!region)
+ return;
+
+ list_add_tail(&region->list, head);
+
+ iommu_dma_get_resv_regions(dev, head);
+}
+
+static int arm_smmu_def_domain_type(struct device *dev)
+{
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+ const struct arm_smmu_impl *impl = cfg->smmu->impl;
+
+ if (using_legacy_binding)
+ return IOMMU_DOMAIN_IDENTITY;
+
+ if (impl && impl->def_domain_type)
+ return impl->def_domain_type(dev);
+
+ return 0;
+}
+
+static struct iommu_ops arm_smmu_ops = {
+ .capable = arm_smmu_capable,
+ .domain_alloc = arm_smmu_domain_alloc,
+ .probe_device = arm_smmu_probe_device,
+ .release_device = arm_smmu_release_device,
+ .probe_finalize = arm_smmu_probe_finalize,
+ .device_group = arm_smmu_device_group,
+ .of_xlate = arm_smmu_of_xlate,
+ .get_resv_regions = arm_smmu_get_resv_regions,
+ .def_domain_type = arm_smmu_def_domain_type,
+ .pgsize_bitmap = -1UL, /* Restricted during device attach */
+ .owner = THIS_MODULE,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = arm_smmu_attach_dev,
+ .map_pages = arm_smmu_map_pages,
+ .unmap_pages = arm_smmu_unmap_pages,
+ .flush_iotlb_all = arm_smmu_flush_iotlb_all,
+ .iotlb_sync = arm_smmu_iotlb_sync,
+ .iova_to_phys = arm_smmu_iova_to_phys,
+ .enable_nesting = arm_smmu_enable_nesting,
+ .set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
+ .free = arm_smmu_domain_free,
+ }
+};
+
+static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
+{
+ int i;
+ u32 reg;
+
+ /* clear global FSR */
+ reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
+
+ /*
+ * Reset stream mapping groups: Initial values mark all SMRn as
+ * invalid and all S2CRn as bypass unless overridden.
+ */
+ for (i = 0; i < smmu->num_mapping_groups; ++i)
+ arm_smmu_write_sme(smmu, i);
+
+ /* Make sure all context banks are disabled and clear CB_FSR */
+ for (i = 0; i < smmu->num_context_banks; ++i) {
+ arm_smmu_write_context_bank(smmu, i);
+ arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
+ }
+
+ /* Invalidate the TLB, just in case */
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
+
+ reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
+
+ /* Enable fault reporting */
+ reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE |
+ ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE);
+
+ /* Disable TLB broadcasting. */
+ reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM);
+
+ /* Enable client access, handling unmatched streams as appropriate */
+ reg &= ~ARM_SMMU_sCR0_CLIENTPD;
+ if (disable_bypass)
+ reg |= ARM_SMMU_sCR0_USFCFG;
+ else
+ reg &= ~ARM_SMMU_sCR0_USFCFG;
+
+ /* Disable forced broadcasting */
+ reg &= ~ARM_SMMU_sCR0_FB;
+
+ /* Don't upgrade barriers */
+ reg &= ~(ARM_SMMU_sCR0_BSU);
+
+ if (smmu->features & ARM_SMMU_FEAT_VMID16)
+ reg |= ARM_SMMU_sCR0_VMID16EN;
+
+ if (smmu->features & ARM_SMMU_FEAT_EXIDS)
+ reg |= ARM_SMMU_sCR0_EXIDENABLE;
+
+ if (smmu->impl && smmu->impl->reset)
+ smmu->impl->reset(smmu);
+
+ /* Push the button */
+ arm_smmu_tlb_sync_global(smmu);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
+}
+
+static int arm_smmu_id_size_to_bits(int size)
+{
+ switch (size) {
+ case 0:
+ return 32;
+ case 1:
+ return 36;
+ case 2:
+ return 40;
+ case 3:
+ return 42;
+ case 4:
+ return 44;
+ case 5:
+ default:
+ return 48;
+ }
+}
+
+static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
+{
+ unsigned int size;
+ u32 id;
+ bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
+ int i, ret;
+
+ dev_notice(smmu->dev, "probing hardware configuration...\n");
+ dev_notice(smmu->dev, "SMMUv%d with:\n",
+ smmu->version == ARM_SMMU_V2 ? 2 : 1);
+
+ /* ID0 */
+ id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
+
+ /* Restrict available stages based on module parameter */
+ if (force_stage == 1)
+ id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS);
+ else if (force_stage == 2)
+ id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
+
+ if (id & ARM_SMMU_ID0_S1TS) {
+ smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
+ dev_notice(smmu->dev, "\tstage 1 translation\n");
+ }
+
+ if (id & ARM_SMMU_ID0_S2TS) {
+ smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
+ dev_notice(smmu->dev, "\tstage 2 translation\n");
+ }
+
+ if (id & ARM_SMMU_ID0_NTS) {
+ smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
+ dev_notice(smmu->dev, "\tnested translation\n");
+ }
+
+ if (!(smmu->features &
+ (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
+ dev_err(smmu->dev, "\tno translation support!\n");
+ return -ENODEV;
+ }
+
+ if ((id & ARM_SMMU_ID0_S1TS) &&
+ ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
+ smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
+ dev_notice(smmu->dev, "\taddress translation ops\n");
+ }
+
+ /*
+ * In order for DMA API calls to work properly, we must defer to what
+ * the FW says about coherency, regardless of what the hardware claims.
+ * Fortunately, this also opens up a workaround for systems where the
+ * ID register value has ended up configured incorrectly.
+ */
+ cttw_reg = !!(id & ARM_SMMU_ID0_CTTW);
+ if (cttw_fw || cttw_reg)
+ dev_notice(smmu->dev, "\t%scoherent table walk\n",
+ cttw_fw ? "" : "non-");
+ if (cttw_fw != cttw_reg)
+ dev_notice(smmu->dev,
+ "\t(IDR0.CTTW overridden by FW configuration)\n");
+
+ /* Max. number of entries we have for stream matching/indexing */
+ if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
+ smmu->features |= ARM_SMMU_FEAT_EXIDS;
+ size = 1 << 16;
+ } else {
+ size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
+ }
+ smmu->streamid_mask = size - 1;
+ if (id & ARM_SMMU_ID0_SMS) {
+ smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
+ size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id);
+ if (size == 0) {
+ dev_err(smmu->dev,
+ "stream-matching supported, but no SMRs present!\n");
+ return -ENODEV;
+ }
+
+ /* Zero-initialised to mark as invalid */
+ smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
+ GFP_KERNEL);
+ if (!smmu->smrs)
+ return -ENOMEM;
+
+ dev_notice(smmu->dev,
+ "\tstream matching with %u register groups", size);
+ }
+ /* s2cr->type == 0 means translation, so initialise explicitly */
+ smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
+ GFP_KERNEL);
+ if (!smmu->s2crs)
+ return -ENOMEM;
+ for (i = 0; i < size; i++)
+ smmu->s2crs[i] = s2cr_init_val;
+
+ smmu->num_mapping_groups = size;
+ mutex_init(&smmu->stream_map_mutex);
+ spin_lock_init(&smmu->global_sync_lock);
+
+ if (smmu->version < ARM_SMMU_V2 ||
+ !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) {
+ smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
+ if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S))
+ smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
+ }
+
+ /* ID1 */
+ id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
+ smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
+
+ /* Check for size mismatch of SMMU address space from mapped region */
+ size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1);
+ if (smmu->numpage != 2 * size << smmu->pgshift)
+ dev_warn(smmu->dev,
+ "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
+ 2 * size << smmu->pgshift, smmu->numpage);
+ /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
+ smmu->numpage = size;
+
+ smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
+ smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
+ if (smmu->num_s2_context_banks > smmu->num_context_banks) {
+ dev_err(smmu->dev, "impossible number of S2 context banks!\n");
+ return -ENODEV;
+ }
+ dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
+ smmu->num_context_banks, smmu->num_s2_context_banks);
+ smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
+ sizeof(*smmu->cbs), GFP_KERNEL);
+ if (!smmu->cbs)
+ return -ENOMEM;
+
+ /* ID2 */
+ id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
+ size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id));
+ smmu->ipa_size = size;
+
+ /* The output mask is also applied for bypass */
+ size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
+ smmu->pa_size = size;
+
+ if (id & ARM_SMMU_ID2_VMID16)
+ smmu->features |= ARM_SMMU_FEAT_VMID16;
+
+ /*
+ * What the page table walker can address actually depends on which
+ * descriptor format is in use, but since a) we don't know that yet,
+ * and b) it can vary per context bank, this will have to do...
+ */
+ if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
+ dev_warn(smmu->dev,
+ "failed to set DMA mask for table walker\n");
+
+ if (smmu->version < ARM_SMMU_V2) {
+ smmu->va_size = smmu->ipa_size;
+ if (smmu->version == ARM_SMMU_V1_64K)
+ smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
+ } else {
+ size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
+ smmu->va_size = arm_smmu_id_size_to_bits(size);
+ if (id & ARM_SMMU_ID2_PTFS_4K)
+ smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
+ if (id & ARM_SMMU_ID2_PTFS_16K)
+ smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
+ if (id & ARM_SMMU_ID2_PTFS_64K)
+ smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
+ }
+
+ if (smmu->impl && smmu->impl->cfg_probe) {
+ ret = smmu->impl->cfg_probe(smmu);
+ if (ret)
+ return ret;
+ }
+
+ /* Now we've corralled the various formats, what'll it do? */
+ if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
+ smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
+ if (smmu->features &
+ (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
+ smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
+ if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
+ smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
+ if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
+ smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
+
+ if (arm_smmu_ops.pgsize_bitmap == -1UL)
+ arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
+ else
+ arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
+ dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
+ smmu->pgsize_bitmap);
+
+
+ if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
+ dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
+ smmu->va_size, smmu->ipa_size);
+
+ if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
+ dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
+ smmu->ipa_size, smmu->pa_size);
+
+ return 0;
+}
+
+struct arm_smmu_match_data {
+ enum arm_smmu_arch_version version;
+ enum arm_smmu_implementation model;
+};
+
+#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
+static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
+
+ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
+ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
+ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
+ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
+ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
+ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
+
+static const struct of_device_id arm_smmu_of_match[] = {
+ { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
+ { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
+ { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
+ { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
+ { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
+ { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
+ { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
+ { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
+ { },
+};
+MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
+
+#ifdef CONFIG_ACPI
+static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
+{
+ int ret = 0;
+
+ switch (model) {
+ case ACPI_IORT_SMMU_V1:
+ case ACPI_IORT_SMMU_CORELINK_MMU400:
+ smmu->version = ARM_SMMU_V1;
+ smmu->model = GENERIC_SMMU;
+ break;
+ case ACPI_IORT_SMMU_CORELINK_MMU401:
+ smmu->version = ARM_SMMU_V1_64K;
+ smmu->model = GENERIC_SMMU;
+ break;
+ case ACPI_IORT_SMMU_V2:
+ smmu->version = ARM_SMMU_V2;
+ smmu->model = GENERIC_SMMU;
+ break;
+ case ACPI_IORT_SMMU_CORELINK_MMU500:
+ smmu->version = ARM_SMMU_V2;
+ smmu->model = ARM_MMU500;
+ break;
+ case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
+ smmu->version = ARM_SMMU_V2;
+ smmu->model = CAVIUM_SMMUV2;
+ break;
+ default:
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
+ u32 *global_irqs, u32 *pmu_irqs)
+{
+ struct device *dev = smmu->dev;
+ struct acpi_iort_node *node =
+ *(struct acpi_iort_node **)dev_get_platdata(dev);
+ struct acpi_iort_smmu *iort_smmu;
+ int ret;
+
+ /* Retrieve SMMU1/2 specific data */
+ iort_smmu = (struct acpi_iort_smmu *)node->node_data;
+
+ ret = acpi_smmu_get_data(iort_smmu->model, smmu);
+ if (ret < 0)
+ return ret;
+
+ /* Ignore the configuration access interrupt */
+ *global_irqs = 1;
+ *pmu_irqs = 0;
+
+ if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
+ smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
+
+ return 0;
+}
+#else
+static inline int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
+ u32 *global_irqs, u32 *pmu_irqs)
+{
+ return -ENODEV;
+}
+#endif
+
+static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu,
+ u32 *global_irqs, u32 *pmu_irqs)
+{
+ const struct arm_smmu_match_data *data;
+ struct device *dev = smmu->dev;
+ bool legacy_binding;
+
+ if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs))
+ return dev_err_probe(dev, -ENODEV,
+ "missing #global-interrupts property\n");
+ *pmu_irqs = 0;
+
+ data = of_device_get_match_data(dev);
+ smmu->version = data->version;
+ smmu->model = data->model;
+
+ legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
+ if (legacy_binding && !using_generic_binding) {
+ if (!using_legacy_binding) {
+ pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
+ IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
+ }
+ using_legacy_binding = true;
+ } else if (!legacy_binding && !using_legacy_binding) {
+ using_generic_binding = true;
+ } else {
+ dev_err(dev, "not probing due to mismatched DT properties\n");
+ return -ENODEV;
+ }
+
+ if (of_dma_is_coherent(dev->of_node))
+ smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
+
+ return 0;
+}
+
+static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
+{
+ struct list_head rmr_list;
+ struct iommu_resv_region *e;
+ int idx, cnt = 0;
+ u32 reg;
+
+ INIT_LIST_HEAD(&rmr_list);
+ iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+
+ /*
+ * Rather than trying to look at existing mappings that
+ * are setup by the firmware and then invalidate the ones
+ * that do no have matching RMR entries, just disable the
+ * SMMU until it gets enabled again in the reset routine.
+ */
+ reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
+ reg |= ARM_SMMU_sCR0_CLIENTPD;
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
+
+ list_for_each_entry(e, &rmr_list, list) {
+ struct iommu_iort_rmr_data *rmr;
+ int i;
+
+ rmr = container_of(e, struct iommu_iort_rmr_data, rr);
+ for (i = 0; i < rmr->num_sids; i++) {
+ idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0);
+ if (idx < 0)
+ continue;
+
+ if (smmu->s2crs[idx].count == 0) {
+ smmu->smrs[idx].id = rmr->sids[i];
+ smmu->smrs[idx].mask = 0;
+ smmu->smrs[idx].valid = true;
+ }
+ smmu->s2crs[idx].count++;
+ smmu->s2crs[idx].type = S2CR_TYPE_BYPASS;
+ smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
+
+ cnt++;
+ }
+ }
+
+ dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
+ cnt == 1 ? "" : "s");
+ iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+}
+
+static int arm_smmu_device_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct arm_smmu_device *smmu;
+ struct device *dev = &pdev->dev;
+ int num_irqs, i, err;
+ u32 global_irqs, pmu_irqs;
+ irqreturn_t (*global_fault)(int irq, void *dev);
+
+ smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
+ if (!smmu) {
+ dev_err(dev, "failed to allocate arm_smmu_device\n");
+ return -ENOMEM;
+ }
+ smmu->dev = dev;
+
+ if (dev->of_node)
+ err = arm_smmu_device_dt_probe(smmu, &global_irqs, &pmu_irqs);
+ else
+ err = arm_smmu_device_acpi_probe(smmu, &global_irqs, &pmu_irqs);
+ if (err)
+ return err;
+
+ smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(smmu->base))
+ return PTR_ERR(smmu->base);
+ smmu->ioaddr = res->start;
+
+ /*
+ * The resource size should effectively match the value of SMMU_TOP;
+ * stash that temporarily until we know PAGESIZE to validate it with.
+ */
+ smmu->numpage = resource_size(res);
+
+ smmu = arm_smmu_impl_init(smmu);
+ if (IS_ERR(smmu))
+ return PTR_ERR(smmu);
+
+ num_irqs = platform_irq_count(pdev);
+
+ smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs;
+ if (smmu->num_context_irqs <= 0)
+ return dev_err_probe(dev, -ENODEV,
+ "found %d interrupts but expected at least %d\n",
+ num_irqs, global_irqs + pmu_irqs + 1);
+
+ smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs,
+ sizeof(*smmu->irqs), GFP_KERNEL);
+ if (!smmu->irqs)
+ return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n",
+ smmu->num_context_irqs);
+
+ for (i = 0; i < smmu->num_context_irqs; i++) {
+ int irq = platform_get_irq(pdev, global_irqs + pmu_irqs + i);
+
+ if (irq < 0)
+ return irq;
+ smmu->irqs[i] = irq;
+ }
+
+ err = devm_clk_bulk_get_all(dev, &smmu->clks);
+ if (err < 0) {
+ dev_err(dev, "failed to get clocks %d\n", err);
+ return err;
+ }
+ smmu->num_clks = err;
+
+ err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
+ if (err)
+ return err;
+
+ err = arm_smmu_device_cfg_probe(smmu);
+ if (err)
+ return err;
+
+ if (smmu->version == ARM_SMMU_V2) {
+ if (smmu->num_context_banks > smmu->num_context_irqs) {
+ dev_err(dev,
+ "found only %d context irq(s) but %d required\n",
+ smmu->num_context_irqs, smmu->num_context_banks);
+ return -ENODEV;
+ }
+
+ /* Ignore superfluous interrupts */
+ smmu->num_context_irqs = smmu->num_context_banks;
+ }
+
+ if (smmu->impl && smmu->impl->global_fault)
+ global_fault = smmu->impl->global_fault;
+ else
+ global_fault = arm_smmu_global_fault;
+
+ for (i = 0; i < global_irqs; i++) {
+ int irq = platform_get_irq(pdev, i);
+
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, global_fault, IRQF_SHARED,
+ "arm-smmu global fault", smmu);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to request global IRQ %d (%u)\n",
+ i, irq);
+ }
+
+ err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
+ "smmu.%pa", &smmu->ioaddr);
+ if (err) {
+ dev_err(dev, "Failed to register iommu in sysfs\n");
+ return err;
+ }
+
+ err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
+ if (err) {
+ dev_err(dev, "Failed to register iommu\n");
+ iommu_device_sysfs_remove(&smmu->iommu);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, smmu);
+
+ /* Check for RMRs and install bypass SMRs if any */
+ arm_smmu_rmr_install_bypass_smr(smmu);
+
+ arm_smmu_device_reset(smmu);
+ arm_smmu_test_smr_masks(smmu);
+
+ /*
+ * We want to avoid touching dev->power.lock in fastpaths unless
+ * it's really going to do something useful - pm_runtime_enabled()
+ * can serve as an ideal proxy for that decision. So, conditionally
+ * enable pm_runtime.
+ */
+ if (dev->pm_domain) {
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return 0;
+}
+
+static void arm_smmu_device_shutdown(struct platform_device *pdev)
+{
+ struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+
+ if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
+ dev_notice(&pdev->dev, "disabling translation\n");
+
+ arm_smmu_rpm_get(smmu);
+ /* Turn the thing off */
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
+ arm_smmu_rpm_put(smmu);
+
+ if (pm_runtime_enabled(smmu->dev))
+ pm_runtime_force_suspend(smmu->dev);
+ else
+ clk_bulk_disable(smmu->num_clks, smmu->clks);
+
+ clk_bulk_unprepare(smmu->num_clks, smmu->clks);
+}
+
+static void arm_smmu_device_remove(struct platform_device *pdev)
+{
+ struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+
+ iommu_device_unregister(&smmu->iommu);
+ iommu_device_sysfs_remove(&smmu->iommu);
+
+ arm_smmu_device_shutdown(pdev);
+}
+
+static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
+{
+ struct arm_smmu_device *smmu = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
+ if (ret)
+ return ret;
+
+ arm_smmu_device_reset(smmu);
+
+ return 0;
+}
+
+static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
+{
+ struct arm_smmu_device *smmu = dev_get_drvdata(dev);
+
+ clk_bulk_disable(smmu->num_clks, smmu->clks);
+
+ return 0;
+}
+
+static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
+{
+ int ret;
+ struct arm_smmu_device *smmu = dev_get_drvdata(dev);
+
+ ret = clk_bulk_prepare(smmu->num_clks, smmu->clks);
+ if (ret)
+ return ret;
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ ret = arm_smmu_runtime_resume(dev);
+ if (ret)
+ clk_bulk_unprepare(smmu->num_clks, smmu->clks);
+
+ return ret;
+}
+
+static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct arm_smmu_device *smmu = dev_get_drvdata(dev);
+
+ if (pm_runtime_suspended(dev))
+ goto clk_unprepare;
+
+ ret = arm_smmu_runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+clk_unprepare:
+ clk_bulk_unprepare(smmu->num_clks, smmu->clks);
+ return ret;
+}
+
+static const struct dev_pm_ops arm_smmu_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
+ SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
+ arm_smmu_runtime_resume, NULL)
+};
+
+static struct platform_driver arm_smmu_driver = {
+ .driver = {
+ .name = "arm-smmu",
+ .of_match_table = arm_smmu_of_match,
+ .pm = &arm_smmu_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+ .probe = arm_smmu_device_probe,
+ .remove_new = arm_smmu_device_remove,
+ .shutdown = arm_smmu_device_shutdown,
+};
+module_platform_driver(arm_smmu_driver);
+
+MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
+MODULE_AUTHOR("Will Deacon <will@kernel.org>");
+MODULE_ALIAS("platform:arm-smmu");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
new file mode 100644
index 0000000000..703fd5817e
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -0,0 +1,534 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IOMMU API for ARM architected SMMU implementations.
+ *
+ * Copyright (C) 2013 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#ifndef _ARM_SMMU_H
+#define _ARM_SMMU_H
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/io-pgtable.h>
+#include <linux/iommu.h>
+#include <linux/irqreturn.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/* Configuration registers */
+#define ARM_SMMU_GR0_sCR0 0x0
+#define ARM_SMMU_sCR0_VMID16EN BIT(31)
+#define ARM_SMMU_sCR0_BSU GENMASK(15, 14)
+#define ARM_SMMU_sCR0_FB BIT(13)
+#define ARM_SMMU_sCR0_PTM BIT(12)
+#define ARM_SMMU_sCR0_VMIDPNE BIT(11)
+#define ARM_SMMU_sCR0_USFCFG BIT(10)
+#define ARM_SMMU_sCR0_GCFGFIE BIT(5)
+#define ARM_SMMU_sCR0_GCFGFRE BIT(4)
+#define ARM_SMMU_sCR0_EXIDENABLE BIT(3)
+#define ARM_SMMU_sCR0_GFIE BIT(2)
+#define ARM_SMMU_sCR0_GFRE BIT(1)
+#define ARM_SMMU_sCR0_CLIENTPD BIT(0)
+
+/* Auxiliary Configuration register */
+#define ARM_SMMU_GR0_sACR 0x10
+
+/* Identification registers */
+#define ARM_SMMU_GR0_ID0 0x20
+#define ARM_SMMU_ID0_S1TS BIT(30)
+#define ARM_SMMU_ID0_S2TS BIT(29)
+#define ARM_SMMU_ID0_NTS BIT(28)
+#define ARM_SMMU_ID0_SMS BIT(27)
+#define ARM_SMMU_ID0_ATOSNS BIT(26)
+#define ARM_SMMU_ID0_PTFS_NO_AARCH32 BIT(25)
+#define ARM_SMMU_ID0_PTFS_NO_AARCH32S BIT(24)
+#define ARM_SMMU_ID0_NUMIRPT GENMASK(23, 16)
+#define ARM_SMMU_ID0_CTTW BIT(14)
+#define ARM_SMMU_ID0_NUMSIDB GENMASK(12, 9)
+#define ARM_SMMU_ID0_EXIDS BIT(8)
+#define ARM_SMMU_ID0_NUMSMRG GENMASK(7, 0)
+
+#define ARM_SMMU_GR0_ID1 0x24
+#define ARM_SMMU_ID1_PAGESIZE BIT(31)
+#define ARM_SMMU_ID1_NUMPAGENDXB GENMASK(30, 28)
+#define ARM_SMMU_ID1_NUMS2CB GENMASK(23, 16)
+#define ARM_SMMU_ID1_NUMCB GENMASK(7, 0)
+
+#define ARM_SMMU_GR0_ID2 0x28
+#define ARM_SMMU_ID2_VMID16 BIT(15)
+#define ARM_SMMU_ID2_PTFS_64K BIT(14)
+#define ARM_SMMU_ID2_PTFS_16K BIT(13)
+#define ARM_SMMU_ID2_PTFS_4K BIT(12)
+#define ARM_SMMU_ID2_UBS GENMASK(11, 8)
+#define ARM_SMMU_ID2_OAS GENMASK(7, 4)
+#define ARM_SMMU_ID2_IAS GENMASK(3, 0)
+
+#define ARM_SMMU_GR0_ID3 0x2c
+#define ARM_SMMU_GR0_ID4 0x30
+#define ARM_SMMU_GR0_ID5 0x34
+#define ARM_SMMU_GR0_ID6 0x38
+
+#define ARM_SMMU_GR0_ID7 0x3c
+#define ARM_SMMU_ID7_MAJOR GENMASK(7, 4)
+#define ARM_SMMU_ID7_MINOR GENMASK(3, 0)
+
+#define ARM_SMMU_GR0_sGFSR 0x48
+#define ARM_SMMU_sGFSR_USF BIT(1)
+
+#define ARM_SMMU_GR0_sGFSYNR0 0x50
+#define ARM_SMMU_GR0_sGFSYNR1 0x54
+#define ARM_SMMU_GR0_sGFSYNR2 0x58
+
+/* Global TLB invalidation */
+#define ARM_SMMU_GR0_TLBIVMID 0x64
+#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
+#define ARM_SMMU_GR0_TLBIALLH 0x6c
+#define ARM_SMMU_GR0_sTLBGSYNC 0x70
+
+#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
+#define ARM_SMMU_sTLBGSTATUS_GSACTIVE BIT(0)
+
+/* Stream mapping registers */
+#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
+#define ARM_SMMU_SMR_VALID BIT(31)
+#define ARM_SMMU_SMR_MASK GENMASK(31, 16)
+#define ARM_SMMU_SMR_ID GENMASK(15, 0)
+
+#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
+#define ARM_SMMU_S2CR_PRIVCFG GENMASK(25, 24)
+enum arm_smmu_s2cr_privcfg {
+ S2CR_PRIVCFG_DEFAULT,
+ S2CR_PRIVCFG_DIPAN,
+ S2CR_PRIVCFG_UNPRIV,
+ S2CR_PRIVCFG_PRIV,
+};
+#define ARM_SMMU_S2CR_TYPE GENMASK(17, 16)
+enum arm_smmu_s2cr_type {
+ S2CR_TYPE_TRANS,
+ S2CR_TYPE_BYPASS,
+ S2CR_TYPE_FAULT,
+};
+#define ARM_SMMU_S2CR_EXIDVALID BIT(10)
+#define ARM_SMMU_S2CR_CBNDX GENMASK(7, 0)
+
+/* Context bank attribute registers */
+#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
+#define ARM_SMMU_CBAR_IRPTNDX GENMASK(31, 24)
+#define ARM_SMMU_CBAR_TYPE GENMASK(17, 16)
+enum arm_smmu_cbar_type {
+ CBAR_TYPE_S2_TRANS,
+ CBAR_TYPE_S1_TRANS_S2_BYPASS,
+ CBAR_TYPE_S1_TRANS_S2_FAULT,
+ CBAR_TYPE_S1_TRANS_S2_TRANS,
+};
+#define ARM_SMMU_CBAR_S1_MEMATTR GENMASK(15, 12)
+#define ARM_SMMU_CBAR_S1_MEMATTR_WB 0xf
+#define ARM_SMMU_CBAR_S1_BPSHCFG GENMASK(9, 8)
+#define ARM_SMMU_CBAR_S1_BPSHCFG_NSH 3
+#define ARM_SMMU_CBAR_VMID GENMASK(7, 0)
+
+#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
+
+#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
+#define ARM_SMMU_CBA2R_VMID16 GENMASK(31, 16)
+#define ARM_SMMU_CBA2R_VA64 BIT(0)
+
+#define ARM_SMMU_CB_SCTLR 0x0
+#define ARM_SMMU_SCTLR_S1_ASIDPNE BIT(12)
+#define ARM_SMMU_SCTLR_CFCFG BIT(7)
+#define ARM_SMMU_SCTLR_HUPCF BIT(8)
+#define ARM_SMMU_SCTLR_CFIE BIT(6)
+#define ARM_SMMU_SCTLR_CFRE BIT(5)
+#define ARM_SMMU_SCTLR_E BIT(4)
+#define ARM_SMMU_SCTLR_AFE BIT(2)
+#define ARM_SMMU_SCTLR_TRE BIT(1)
+#define ARM_SMMU_SCTLR_M BIT(0)
+
+#define ARM_SMMU_CB_ACTLR 0x4
+
+#define ARM_SMMU_CB_RESUME 0x8
+#define ARM_SMMU_RESUME_TERMINATE BIT(0)
+
+#define ARM_SMMU_CB_TCR2 0x10
+#define ARM_SMMU_TCR2_SEP GENMASK(17, 15)
+#define ARM_SMMU_TCR2_SEP_UPSTREAM 0x7
+#define ARM_SMMU_TCR2_AS BIT(4)
+#define ARM_SMMU_TCR2_PASIZE GENMASK(3, 0)
+
+#define ARM_SMMU_CB_TTBR0 0x20
+#define ARM_SMMU_CB_TTBR1 0x28
+#define ARM_SMMU_TTBRn_ASID GENMASK_ULL(63, 48)
+
+#define ARM_SMMU_CB_TCR 0x30
+#define ARM_SMMU_TCR_EAE BIT(31)
+#define ARM_SMMU_TCR_EPD1 BIT(23)
+#define ARM_SMMU_TCR_A1 BIT(22)
+#define ARM_SMMU_TCR_TG0 GENMASK(15, 14)
+#define ARM_SMMU_TCR_SH0 GENMASK(13, 12)
+#define ARM_SMMU_TCR_ORGN0 GENMASK(11, 10)
+#define ARM_SMMU_TCR_IRGN0 GENMASK(9, 8)
+#define ARM_SMMU_TCR_EPD0 BIT(7)
+#define ARM_SMMU_TCR_T0SZ GENMASK(5, 0)
+
+#define ARM_SMMU_VTCR_RES1 BIT(31)
+#define ARM_SMMU_VTCR_PS GENMASK(18, 16)
+#define ARM_SMMU_VTCR_TG0 ARM_SMMU_TCR_TG0
+#define ARM_SMMU_VTCR_SH0 ARM_SMMU_TCR_SH0
+#define ARM_SMMU_VTCR_ORGN0 ARM_SMMU_TCR_ORGN0
+#define ARM_SMMU_VTCR_IRGN0 ARM_SMMU_TCR_IRGN0
+#define ARM_SMMU_VTCR_SL0 GENMASK(7, 6)
+#define ARM_SMMU_VTCR_T0SZ ARM_SMMU_TCR_T0SZ
+
+#define ARM_SMMU_CB_CONTEXTIDR 0x34
+#define ARM_SMMU_CB_S1_MAIR0 0x38
+#define ARM_SMMU_CB_S1_MAIR1 0x3c
+
+#define ARM_SMMU_CB_PAR 0x50
+#define ARM_SMMU_CB_PAR_F BIT(0)
+
+#define ARM_SMMU_CB_FSR 0x58
+#define ARM_SMMU_FSR_MULTI BIT(31)
+#define ARM_SMMU_FSR_SS BIT(30)
+#define ARM_SMMU_FSR_UUT BIT(8)
+#define ARM_SMMU_FSR_ASF BIT(7)
+#define ARM_SMMU_FSR_TLBLKF BIT(6)
+#define ARM_SMMU_FSR_TLBMCF BIT(5)
+#define ARM_SMMU_FSR_EF BIT(4)
+#define ARM_SMMU_FSR_PF BIT(3)
+#define ARM_SMMU_FSR_AFF BIT(2)
+#define ARM_SMMU_FSR_TF BIT(1)
+
+#define ARM_SMMU_FSR_IGN (ARM_SMMU_FSR_AFF | \
+ ARM_SMMU_FSR_ASF | \
+ ARM_SMMU_FSR_TLBMCF | \
+ ARM_SMMU_FSR_TLBLKF)
+
+#define ARM_SMMU_FSR_FAULT (ARM_SMMU_FSR_MULTI | \
+ ARM_SMMU_FSR_SS | \
+ ARM_SMMU_FSR_UUT | \
+ ARM_SMMU_FSR_EF | \
+ ARM_SMMU_FSR_PF | \
+ ARM_SMMU_FSR_TF | \
+ ARM_SMMU_FSR_IGN)
+
+#define ARM_SMMU_CB_FAR 0x60
+
+#define ARM_SMMU_CB_FSYNR0 0x68
+#define ARM_SMMU_FSYNR0_WNR BIT(4)
+
+#define ARM_SMMU_CB_FSYNR1 0x6c
+
+#define ARM_SMMU_CB_S1_TLBIVA 0x600
+#define ARM_SMMU_CB_S1_TLBIASID 0x610
+#define ARM_SMMU_CB_S1_TLBIVAL 0x620
+#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
+#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
+#define ARM_SMMU_CB_TLBSYNC 0x7f0
+#define ARM_SMMU_CB_TLBSTATUS 0x7f4
+#define ARM_SMMU_CB_ATS1PR 0x800
+
+#define ARM_SMMU_CB_ATSR 0x8f0
+#define ARM_SMMU_ATSR_ACTIVE BIT(0)
+
+
+/* Maximum number of context banks per SMMU */
+#define ARM_SMMU_MAX_CBS 128
+
+#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
+#define TLB_SPIN_COUNT 10
+
+/* Shared driver definitions */
+enum arm_smmu_arch_version {
+ ARM_SMMU_V1,
+ ARM_SMMU_V1_64K,
+ ARM_SMMU_V2,
+};
+
+enum arm_smmu_implementation {
+ GENERIC_SMMU,
+ ARM_MMU500,
+ CAVIUM_SMMUV2,
+ QCOM_SMMUV2,
+};
+
+struct arm_smmu_s2cr {
+ struct iommu_group *group;
+ int count;
+ enum arm_smmu_s2cr_type type;
+ enum arm_smmu_s2cr_privcfg privcfg;
+ u8 cbndx;
+};
+
+struct arm_smmu_smr {
+ u16 mask;
+ u16 id;
+ bool valid;
+ bool pinned;
+};
+
+struct arm_smmu_device {
+ struct device *dev;
+
+ void __iomem *base;
+ phys_addr_t ioaddr;
+ unsigned int numpage;
+ unsigned int pgshift;
+
+#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
+#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
+#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
+#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
+#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
+#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
+#define ARM_SMMU_FEAT_VMID16 (1 << 6)
+#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
+#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
+#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
+#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
+#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
+#define ARM_SMMU_FEAT_EXIDS (1 << 12)
+ u32 features;
+
+ enum arm_smmu_arch_version version;
+ enum arm_smmu_implementation model;
+ const struct arm_smmu_impl *impl;
+
+ u32 num_context_banks;
+ u32 num_s2_context_banks;
+ DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
+ struct arm_smmu_cb *cbs;
+ atomic_t irptndx;
+
+ u32 num_mapping_groups;
+ u16 streamid_mask;
+ u16 smr_mask_mask;
+ struct arm_smmu_smr *smrs;
+ struct arm_smmu_s2cr *s2crs;
+ struct mutex stream_map_mutex;
+
+ unsigned long va_size;
+ unsigned long ipa_size;
+ unsigned long pa_size;
+ unsigned long pgsize_bitmap;
+
+ int num_context_irqs;
+ int num_clks;
+ unsigned int *irqs;
+ struct clk_bulk_data *clks;
+
+ spinlock_t global_sync_lock;
+
+ /* IOMMU core code handle */
+ struct iommu_device iommu;
+};
+
+enum arm_smmu_context_fmt {
+ ARM_SMMU_CTX_FMT_NONE,
+ ARM_SMMU_CTX_FMT_AARCH64,
+ ARM_SMMU_CTX_FMT_AARCH32_L,
+ ARM_SMMU_CTX_FMT_AARCH32_S,
+};
+
+struct arm_smmu_cfg {
+ u8 cbndx;
+ u8 irptndx;
+ union {
+ u16 asid;
+ u16 vmid;
+ };
+ enum arm_smmu_cbar_type cbar;
+ enum arm_smmu_context_fmt fmt;
+ bool flush_walk_prefer_tlbiasid;
+};
+#define ARM_SMMU_INVALID_IRPTNDX 0xff
+
+struct arm_smmu_cb {
+ u64 ttbr[2];
+ u32 tcr[2];
+ u32 mair[2];
+ struct arm_smmu_cfg *cfg;
+};
+
+enum arm_smmu_domain_stage {
+ ARM_SMMU_DOMAIN_S1 = 0,
+ ARM_SMMU_DOMAIN_S2,
+ ARM_SMMU_DOMAIN_NESTED,
+ ARM_SMMU_DOMAIN_BYPASS,
+};
+
+struct arm_smmu_domain {
+ struct arm_smmu_device *smmu;
+ struct io_pgtable_ops *pgtbl_ops;
+ unsigned long pgtbl_quirks;
+ const struct iommu_flush_ops *flush_ops;
+ struct arm_smmu_cfg cfg;
+ enum arm_smmu_domain_stage stage;
+ struct mutex init_mutex; /* Protects smmu pointer */
+ spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
+ struct iommu_domain domain;
+};
+
+struct arm_smmu_master_cfg {
+ struct arm_smmu_device *smmu;
+ s16 smendx[];
+};
+
+static inline u32 arm_smmu_lpae_tcr(const struct io_pgtable_cfg *cfg)
+{
+ u32 tcr = FIELD_PREP(ARM_SMMU_TCR_TG0, cfg->arm_lpae_s1_cfg.tcr.tg) |
+ FIELD_PREP(ARM_SMMU_TCR_SH0, cfg->arm_lpae_s1_cfg.tcr.sh) |
+ FIELD_PREP(ARM_SMMU_TCR_ORGN0, cfg->arm_lpae_s1_cfg.tcr.orgn) |
+ FIELD_PREP(ARM_SMMU_TCR_IRGN0, cfg->arm_lpae_s1_cfg.tcr.irgn) |
+ FIELD_PREP(ARM_SMMU_TCR_T0SZ, cfg->arm_lpae_s1_cfg.tcr.tsz);
+
+ /*
+ * When TTBR1 is selected shift the TCR fields by 16 bits and disable
+ * translation in TTBR0
+ */
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
+ tcr = (tcr << 16) & ~ARM_SMMU_TCR_A1;
+ tcr |= ARM_SMMU_TCR_EPD0;
+ } else
+ tcr |= ARM_SMMU_TCR_EPD1;
+
+ return tcr;
+}
+
+static inline u32 arm_smmu_lpae_tcr2(const struct io_pgtable_cfg *cfg)
+{
+ return FIELD_PREP(ARM_SMMU_TCR2_PASIZE, cfg->arm_lpae_s1_cfg.tcr.ips) |
+ FIELD_PREP(ARM_SMMU_TCR2_SEP, ARM_SMMU_TCR2_SEP_UPSTREAM);
+}
+
+static inline u32 arm_smmu_lpae_vtcr(const struct io_pgtable_cfg *cfg)
+{
+ return ARM_SMMU_VTCR_RES1 |
+ FIELD_PREP(ARM_SMMU_VTCR_PS, cfg->arm_lpae_s2_cfg.vtcr.ps) |
+ FIELD_PREP(ARM_SMMU_VTCR_TG0, cfg->arm_lpae_s2_cfg.vtcr.tg) |
+ FIELD_PREP(ARM_SMMU_VTCR_SH0, cfg->arm_lpae_s2_cfg.vtcr.sh) |
+ FIELD_PREP(ARM_SMMU_VTCR_ORGN0, cfg->arm_lpae_s2_cfg.vtcr.orgn) |
+ FIELD_PREP(ARM_SMMU_VTCR_IRGN0, cfg->arm_lpae_s2_cfg.vtcr.irgn) |
+ FIELD_PREP(ARM_SMMU_VTCR_SL0, cfg->arm_lpae_s2_cfg.vtcr.sl) |
+ FIELD_PREP(ARM_SMMU_VTCR_T0SZ, cfg->arm_lpae_s2_cfg.vtcr.tsz);
+}
+
+/* Implementation details, yay! */
+struct arm_smmu_impl {
+ u32 (*read_reg)(struct arm_smmu_device *smmu, int page, int offset);
+ void (*write_reg)(struct arm_smmu_device *smmu, int page, int offset,
+ u32 val);
+ u64 (*read_reg64)(struct arm_smmu_device *smmu, int page, int offset);
+ void (*write_reg64)(struct arm_smmu_device *smmu, int page, int offset,
+ u64 val);
+ int (*cfg_probe)(struct arm_smmu_device *smmu);
+ int (*reset)(struct arm_smmu_device *smmu);
+ int (*init_context)(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *cfg, struct device *dev);
+ void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
+ int status);
+ int (*def_domain_type)(struct device *dev);
+ irqreturn_t (*global_fault)(int irq, void *dev);
+ irqreturn_t (*context_fault)(int irq, void *dev);
+ int (*alloc_context_bank)(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_device *smmu,
+ struct device *dev, int start);
+ void (*write_s2cr)(struct arm_smmu_device *smmu, int idx);
+ void (*write_sctlr)(struct arm_smmu_device *smmu, int idx, u32 reg);
+ void (*probe_finalize)(struct arm_smmu_device *smmu, struct device *dev);
+};
+
+#define INVALID_SMENDX -1
+#define cfg_smendx(cfg, fw, i) \
+ (i >= fw->num_ids ? INVALID_SMENDX : cfg->smendx[i])
+#define for_each_cfg_sme(cfg, fw, i, idx) \
+ for (i = 0; idx = cfg_smendx(cfg, fw, i), i < fw->num_ids; ++i)
+
+static inline int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
+{
+ int idx;
+
+ do {
+ idx = find_next_zero_bit(map, end, start);
+ if (idx == end)
+ return -ENOSPC;
+ } while (test_and_set_bit(idx, map));
+
+ return idx;
+}
+
+static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
+{
+ return smmu->base + (n << smmu->pgshift);
+}
+
+static inline u32 arm_smmu_readl(struct arm_smmu_device *smmu, int page, int offset)
+{
+ if (smmu->impl && unlikely(smmu->impl->read_reg))
+ return smmu->impl->read_reg(smmu, page, offset);
+ return readl_relaxed(arm_smmu_page(smmu, page) + offset);
+}
+
+static inline void arm_smmu_writel(struct arm_smmu_device *smmu, int page,
+ int offset, u32 val)
+{
+ if (smmu->impl && unlikely(smmu->impl->write_reg))
+ smmu->impl->write_reg(smmu, page, offset, val);
+ else
+ writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
+}
+
+static inline u64 arm_smmu_readq(struct arm_smmu_device *smmu, int page, int offset)
+{
+ if (smmu->impl && unlikely(smmu->impl->read_reg64))
+ return smmu->impl->read_reg64(smmu, page, offset);
+ return readq_relaxed(arm_smmu_page(smmu, page) + offset);
+}
+
+static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page,
+ int offset, u64 val)
+{
+ if (smmu->impl && unlikely(smmu->impl->write_reg64))
+ smmu->impl->write_reg64(smmu, page, offset, val);
+ else
+ writeq_relaxed(val, arm_smmu_page(smmu, page) + offset);
+}
+
+#define ARM_SMMU_GR0 0
+#define ARM_SMMU_GR1 1
+#define ARM_SMMU_CB(s, n) ((s)->numpage + (n))
+
+#define arm_smmu_gr0_read(s, o) \
+ arm_smmu_readl((s), ARM_SMMU_GR0, (o))
+#define arm_smmu_gr0_write(s, o, v) \
+ arm_smmu_writel((s), ARM_SMMU_GR0, (o), (v))
+
+#define arm_smmu_gr1_read(s, o) \
+ arm_smmu_readl((s), ARM_SMMU_GR1, (o))
+#define arm_smmu_gr1_write(s, o, v) \
+ arm_smmu_writel((s), ARM_SMMU_GR1, (o), (v))
+
+#define arm_smmu_cb_read(s, n, o) \
+ arm_smmu_readl((s), ARM_SMMU_CB((s), (n)), (o))
+#define arm_smmu_cb_write(s, n, o, v) \
+ arm_smmu_writel((s), ARM_SMMU_CB((s), (n)), (o), (v))
+#define arm_smmu_cb_readq(s, n, o) \
+ arm_smmu_readq((s), ARM_SMMU_CB((s), (n)), (o))
+#define arm_smmu_cb_writeq(s, n, o, v) \
+ arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v))
+
+struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
+struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu);
+struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
+
+void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx);
+int arm_mmu500_reset(struct arm_smmu_device *smmu);
+
+#endif /* _ARM_SMMU_H */
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
new file mode 100644
index 0000000000..775a3cbaff
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -0,0 +1,915 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c
+ *
+ * Copyright (C) 2013 ARM Limited
+ * Copyright (C) 2017 Red Hat
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/io-pgtable.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/kconfig.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "arm-smmu.h"
+
+#define SMMU_INTR_SEL_NS 0x2000
+
+enum qcom_iommu_clk {
+ CLK_IFACE,
+ CLK_BUS,
+ CLK_TBU,
+ CLK_NUM,
+};
+
+struct qcom_iommu_ctx;
+
+struct qcom_iommu_dev {
+ /* IOMMU core code handle */
+ struct iommu_device iommu;
+ struct device *dev;
+ struct clk_bulk_data clks[CLK_NUM];
+ void __iomem *local_base;
+ u32 sec_id;
+ u8 max_asid;
+ struct qcom_iommu_ctx *ctxs[]; /* indexed by asid */
+};
+
+struct qcom_iommu_ctx {
+ struct device *dev;
+ void __iomem *base;
+ bool secure_init;
+ bool secured_ctx;
+ u8 asid; /* asid and ctx bank # are 1:1 */
+ struct iommu_domain *domain;
+};
+
+struct qcom_iommu_domain {
+ struct io_pgtable_ops *pgtbl_ops;
+ spinlock_t pgtbl_lock;
+ struct mutex init_mutex; /* Protects iommu pointer */
+ struct iommu_domain domain;
+ struct qcom_iommu_dev *iommu;
+ struct iommu_fwspec *fwspec;
+};
+
+static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct qcom_iommu_domain, domain);
+}
+
+static const struct iommu_ops qcom_iommu_ops;
+
+static struct qcom_iommu_dev * to_iommu(struct device *dev)
+{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ if (!fwspec || fwspec->ops != &qcom_iommu_ops)
+ return NULL;
+
+ return dev_iommu_priv_get(dev);
+}
+
+static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
+{
+ struct qcom_iommu_dev *qcom_iommu = d->iommu;
+ if (!qcom_iommu)
+ return NULL;
+ return qcom_iommu->ctxs[asid];
+}
+
+static inline void
+iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val)
+{
+ writel_relaxed(val, ctx->base + reg);
+}
+
+static inline void
+iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val)
+{
+ writeq_relaxed(val, ctx->base + reg);
+}
+
+static inline u32
+iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg)
+{
+ return readl_relaxed(ctx->base + reg);
+}
+
+static inline u64
+iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
+{
+ return readq_relaxed(ctx->base + reg);
+}
+
+static void qcom_iommu_tlb_sync(void *cookie)
+{
+ struct qcom_iommu_domain *qcom_domain = cookie;
+ struct iommu_fwspec *fwspec = qcom_domain->fwspec;
+ unsigned i;
+
+ for (i = 0; i < fwspec->num_ids; i++) {
+ struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
+ unsigned int val, ret;
+
+ iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
+
+ ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val,
+ (val & 0x1) == 0, 0, 5000000);
+ if (ret)
+ dev_err(ctx->dev, "timeout waiting for TLB SYNC\n");
+ }
+}
+
+static void qcom_iommu_tlb_inv_context(void *cookie)
+{
+ struct qcom_iommu_domain *qcom_domain = cookie;
+ struct iommu_fwspec *fwspec = qcom_domain->fwspec;
+ unsigned i;
+
+ for (i = 0; i < fwspec->num_ids; i++) {
+ struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
+ iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
+ }
+
+ qcom_iommu_tlb_sync(cookie);
+}
+
+static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
+ size_t granule, bool leaf, void *cookie)
+{
+ struct qcom_iommu_domain *qcom_domain = cookie;
+ struct iommu_fwspec *fwspec = qcom_domain->fwspec;
+ unsigned i, reg;
+
+ reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
+
+ for (i = 0; i < fwspec->num_ids; i++) {
+ struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
+ size_t s = size;
+
+ iova = (iova >> 12) << 12;
+ iova |= ctx->asid;
+ do {
+ iommu_writel(ctx, reg, iova);
+ iova += granule;
+ } while (s -= granule);
+ }
+}
+
+static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
+ qcom_iommu_tlb_sync(cookie);
+}
+
+static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
+}
+
+static const struct iommu_flush_ops qcom_flush_ops = {
+ .tlb_flush_all = qcom_iommu_tlb_inv_context,
+ .tlb_flush_walk = qcom_iommu_tlb_flush_walk,
+ .tlb_add_page = qcom_iommu_tlb_add_page,
+};
+
+static irqreturn_t qcom_iommu_fault(int irq, void *dev)
+{
+ struct qcom_iommu_ctx *ctx = dev;
+ u32 fsr, fsynr;
+ u64 iova;
+
+ fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
+
+ if (!(fsr & ARM_SMMU_FSR_FAULT))
+ return IRQ_NONE;
+
+ fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
+ iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
+
+ if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
+ dev_err_ratelimited(ctx->dev,
+ "Unhandled context fault: fsr=0x%x, "
+ "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
+ fsr, iova, fsynr, ctx->asid);
+ }
+
+ iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
+ iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
+
+ return IRQ_HANDLED;
+}
+
+static int qcom_iommu_init_domain(struct iommu_domain *domain,
+ struct qcom_iommu_dev *qcom_iommu,
+ struct device *dev)
+{
+ struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct io_pgtable_ops *pgtbl_ops;
+ struct io_pgtable_cfg pgtbl_cfg;
+ int i, ret = 0;
+ u32 reg;
+
+ mutex_lock(&qcom_domain->init_mutex);
+ if (qcom_domain->iommu)
+ goto out_unlock;
+
+ pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
+ .ias = 32,
+ .oas = 40,
+ .tlb = &qcom_flush_ops,
+ .iommu_dev = qcom_iommu->dev,
+ };
+
+ qcom_domain->iommu = qcom_iommu;
+ qcom_domain->fwspec = fwspec;
+
+ pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain);
+ if (!pgtbl_ops) {
+ dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
+ ret = -ENOMEM;
+ goto out_clear_iommu;
+ }
+
+ /* Update the domain's page sizes to reflect the page table format */
+ domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+ domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1;
+ domain->geometry.force_aperture = true;
+
+ for (i = 0; i < fwspec->num_ids; i++) {
+ struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
+
+ if (!ctx->secure_init) {
+ ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
+ if (ret) {
+ dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret);
+ goto out_clear_iommu;
+ }
+ ctx->secure_init = true;
+ }
+
+ /* Secured QSMMU-500/QSMMU-v2 contexts cannot be programmed */
+ if (ctx->secured_ctx) {
+ ctx->domain = domain;
+ continue;
+ }
+
+ /* Disable context bank before programming */
+ iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
+
+ /* Clear context bank fault address fault status registers */
+ iommu_writel(ctx, ARM_SMMU_CB_FAR, 0);
+ iommu_writel(ctx, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
+
+ /* TTBRs */
+ iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
+ pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
+ FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid));
+ iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0);
+
+ /* TCR */
+ iommu_writel(ctx, ARM_SMMU_CB_TCR2,
+ arm_smmu_lpae_tcr2(&pgtbl_cfg));
+ iommu_writel(ctx, ARM_SMMU_CB_TCR,
+ arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE);
+
+ /* MAIRs (stage-1 only) */
+ iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
+ pgtbl_cfg.arm_lpae_s1_cfg.mair);
+ iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
+ pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
+
+ /* SCTLR */
+ reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE |
+ ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE |
+ ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
+ ARM_SMMU_SCTLR_CFCFG;
+
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ reg |= ARM_SMMU_SCTLR_E;
+
+ iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
+
+ ctx->domain = domain;
+ }
+
+ mutex_unlock(&qcom_domain->init_mutex);
+
+ /* Publish page table ops for map/unmap */
+ qcom_domain->pgtbl_ops = pgtbl_ops;
+
+ return 0;
+
+out_clear_iommu:
+ qcom_domain->iommu = NULL;
+out_unlock:
+ mutex_unlock(&qcom_domain->init_mutex);
+ return ret;
+}
+
+static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type)
+{
+ struct qcom_iommu_domain *qcom_domain;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
+ return NULL;
+ /*
+ * Allocate the domain and initialise some of its data structures.
+ * We can't really do anything meaningful until we've added a
+ * master.
+ */
+ qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL);
+ if (!qcom_domain)
+ return NULL;
+
+ mutex_init(&qcom_domain->init_mutex);
+ spin_lock_init(&qcom_domain->pgtbl_lock);
+
+ return &qcom_domain->domain;
+}
+
+static void qcom_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+
+ if (qcom_domain->iommu) {
+ /*
+ * NOTE: unmap can be called after client device is powered
+ * off, for example, with GPUs or anything involving dma-buf.
+ * So we cannot rely on the device_link. Make sure the IOMMU
+ * is on to avoid unclocked accesses in the TLB inv path:
+ */
+ pm_runtime_get_sync(qcom_domain->iommu->dev);
+ free_io_pgtable_ops(qcom_domain->pgtbl_ops);
+ pm_runtime_put_sync(qcom_domain->iommu->dev);
+ }
+
+ kfree(qcom_domain);
+}
+
+static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
+ struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ int ret;
+
+ if (!qcom_iommu) {
+ dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n");
+ return -ENXIO;
+ }
+
+ /* Ensure that the domain is finalized */
+ pm_runtime_get_sync(qcom_iommu->dev);
+ ret = qcom_iommu_init_domain(domain, qcom_iommu, dev);
+ pm_runtime_put_sync(qcom_iommu->dev);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Sanity check the domain. We don't support domains across
+ * different IOMMUs.
+ */
+ if (qcom_domain->iommu != qcom_iommu)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
+{
+ int ret;
+ unsigned long flags;
+ struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
+
+ if (!ops)
+ return -ENODEV;
+
+ spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
+ ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, GFP_ATOMIC, mapped);
+ spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
+ return ret;
+}
+
+static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
+{
+ size_t ret;
+ unsigned long flags;
+ struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
+
+ if (!ops)
+ return 0;
+
+ /* NOTE: unmap can be called after client device is powered off,
+ * for example, with GPUs or anything involving dma-buf. So we
+ * cannot rely on the device_link. Make sure the IOMMU is on to
+ * avoid unclocked accesses in the TLB inv path:
+ */
+ pm_runtime_get_sync(qcom_domain->iommu->dev);
+ spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
+ ret = ops->unmap_pages(ops, iova, pgsize, pgcount, gather);
+ spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
+ pm_runtime_put_sync(qcom_domain->iommu->dev);
+
+ return ret;
+}
+
+static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
+ struct io_pgtable, ops);
+ if (!qcom_domain->pgtbl_ops)
+ return;
+
+ pm_runtime_get_sync(qcom_domain->iommu->dev);
+ qcom_iommu_tlb_sync(pgtable->cookie);
+ pm_runtime_put_sync(qcom_domain->iommu->dev);
+}
+
+static void qcom_iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
+{
+ qcom_iommu_flush_iotlb_all(domain);
+}
+
+static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ phys_addr_t ret;
+ unsigned long flags;
+ struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
+
+ if (!ops)
+ return 0;
+
+ spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
+ ret = ops->iova_to_phys(ops, iova);
+ spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
+
+ return ret;
+}
+
+static bool qcom_iommu_capable(struct device *dev, enum iommu_cap cap)
+{
+ switch (cap) {
+ case IOMMU_CAP_CACHE_COHERENCY:
+ /*
+ * Return true here as the SMMU can always send out coherent
+ * requests.
+ */
+ return true;
+ case IOMMU_CAP_NOEXEC:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
+{
+ struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
+ struct device_link *link;
+
+ if (!qcom_iommu)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * Establish the link between iommu and master, so that the
+ * iommu gets runtime enabled/disabled as per the master's
+ * needs.
+ */
+ link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME);
+ if (!link) {
+ dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n",
+ dev_name(qcom_iommu->dev), dev_name(dev));
+ return ERR_PTR(-ENODEV);
+ }
+
+ return &qcom_iommu->iommu;
+}
+
+static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+ struct qcom_iommu_dev *qcom_iommu;
+ struct platform_device *iommu_pdev;
+ unsigned asid = args->args[0];
+
+ if (args->args_count != 1) {
+ dev_err(dev, "incorrect number of iommu params found for %s "
+ "(found %d, expected 1)\n",
+ args->np->full_name, args->args_count);
+ return -EINVAL;
+ }
+
+ iommu_pdev = of_find_device_by_node(args->np);
+ if (WARN_ON(!iommu_pdev))
+ return -EINVAL;
+
+ qcom_iommu = platform_get_drvdata(iommu_pdev);
+
+ /* make sure the asid specified in dt is valid, so we don't have
+ * to sanity check this elsewhere:
+ */
+ if (WARN_ON(asid > qcom_iommu->max_asid) ||
+ WARN_ON(qcom_iommu->ctxs[asid] == NULL)) {
+ put_device(&iommu_pdev->dev);
+ return -EINVAL;
+ }
+
+ if (!dev_iommu_priv_get(dev)) {
+ dev_iommu_priv_set(dev, qcom_iommu);
+ } else {
+ /* make sure devices iommus dt node isn't referring to
+ * multiple different iommu devices. Multiple context
+ * banks are ok, but multiple devices are not:
+ */
+ if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) {
+ put_device(&iommu_pdev->dev);
+ return -EINVAL;
+ }
+ }
+
+ return iommu_fwspec_add_ids(dev, &asid, 1);
+}
+
+static const struct iommu_ops qcom_iommu_ops = {
+ .capable = qcom_iommu_capable,
+ .domain_alloc = qcom_iommu_domain_alloc,
+ .probe_device = qcom_iommu_probe_device,
+ .device_group = generic_device_group,
+ .of_xlate = qcom_iommu_of_xlate,
+ .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = qcom_iommu_attach_dev,
+ .map_pages = qcom_iommu_map,
+ .unmap_pages = qcom_iommu_unmap,
+ .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
+ .iotlb_sync = qcom_iommu_iotlb_sync,
+ .iova_to_phys = qcom_iommu_iova_to_phys,
+ .free = qcom_iommu_domain_free,
+ }
+};
+
+static int qcom_iommu_sec_ptbl_init(struct device *dev)
+{
+ size_t psize = 0;
+ unsigned int spare = 0;
+ void *cpu_addr;
+ dma_addr_t paddr;
+ unsigned long attrs;
+ static bool allocated = false;
+ int ret;
+
+ if (allocated)
+ return 0;
+
+ ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize);
+ if (ret) {
+ dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
+ ret);
+ return ret;
+ }
+
+ dev_info(dev, "iommu sec: pgtable size: %zu\n", psize);
+
+ attrs = DMA_ATTR_NO_KERNEL_MAPPING;
+
+ cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs);
+ if (!cpu_addr) {
+ dev_err(dev, "failed to allocate %zu bytes for pgtable\n",
+ psize);
+ return -ENOMEM;
+ }
+
+ ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare);
+ if (ret) {
+ dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
+ goto free_mem;
+ }
+
+ allocated = true;
+ return 0;
+
+free_mem:
+ dma_free_attrs(dev, psize, cpu_addr, paddr, attrs);
+ return ret;
+}
+
+static int get_asid(const struct device_node *np)
+{
+ u32 reg, val;
+ int asid;
+
+ /* read the "reg" property directly to get the relative address
+ * of the context bank, and calculate the asid from that:
+ */
+ if (of_property_read_u32_index(np, "reg", 0, &reg))
+ return -ENODEV;
+
+ /*
+ * Context banks are 0x1000 apart but, in some cases, the ASID
+ * number doesn't match to this logic and needs to be passed
+ * from the DT configuration explicitly.
+ */
+ if (!of_property_read_u32(np, "qcom,ctx-asid", &val))
+ asid = val;
+ else
+ asid = reg / 0x1000;
+
+ return asid;
+}
+
+static int qcom_iommu_ctx_probe(struct platform_device *pdev)
+{
+ struct qcom_iommu_ctx *ctx;
+ struct device *dev = &pdev->dev;
+ struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent);
+ int ret, irq;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+ platform_set_drvdata(pdev, ctx);
+
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->base))
+ return PTR_ERR(ctx->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ if (of_device_is_compatible(dev->of_node, "qcom,msm-iommu-v2-sec"))
+ ctx->secured_ctx = true;
+
+ /* clear IRQs before registering fault handler, just in case the
+ * boot-loader left us a surprise:
+ */
+ if (!ctx->secured_ctx)
+ iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
+
+ ret = devm_request_irq(dev, irq,
+ qcom_iommu_fault,
+ IRQF_SHARED,
+ "qcom-iommu-fault",
+ ctx);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ %u\n", irq);
+ return ret;
+ }
+
+ ret = get_asid(dev->of_node);
+ if (ret < 0) {
+ dev_err(dev, "missing reg property\n");
+ return ret;
+ }
+
+ ctx->asid = ret;
+
+ dev_dbg(dev, "found asid %u\n", ctx->asid);
+
+ qcom_iommu->ctxs[ctx->asid] = ctx;
+
+ return 0;
+}
+
+static void qcom_iommu_ctx_remove(struct platform_device *pdev)
+{
+ struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent);
+ struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ qcom_iommu->ctxs[ctx->asid] = NULL;
+}
+
+static const struct of_device_id ctx_of_match[] = {
+ { .compatible = "qcom,msm-iommu-v1-ns" },
+ { .compatible = "qcom,msm-iommu-v1-sec" },
+ { .compatible = "qcom,msm-iommu-v2-ns" },
+ { .compatible = "qcom,msm-iommu-v2-sec" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver qcom_iommu_ctx_driver = {
+ .driver = {
+ .name = "qcom-iommu-ctx",
+ .of_match_table = ctx_of_match,
+ },
+ .probe = qcom_iommu_ctx_probe,
+ .remove_new = qcom_iommu_ctx_remove,
+};
+
+static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
+{
+ struct device_node *child;
+
+ for_each_child_of_node(qcom_iommu->dev->of_node, child) {
+ if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec") ||
+ of_device_is_compatible(child, "qcom,msm-iommu-v2-sec")) {
+ of_node_put(child);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int qcom_iommu_device_probe(struct platform_device *pdev)
+{
+ struct device_node *child;
+ struct qcom_iommu_dev *qcom_iommu;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct clk *clk;
+ int ret, max_asid = 0;
+
+ /* find the max asid (which is 1:1 to ctx bank idx), so we know how
+ * many child ctx devices we have:
+ */
+ for_each_child_of_node(dev->of_node, child)
+ max_asid = max(max_asid, get_asid(child));
+
+ qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid + 1),
+ GFP_KERNEL);
+ if (!qcom_iommu)
+ return -ENOMEM;
+ qcom_iommu->max_asid = max_asid;
+ qcom_iommu->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res) {
+ qcom_iommu->local_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qcom_iommu->local_base))
+ return PTR_ERR(qcom_iommu->local_base);
+ }
+
+ clk = devm_clk_get(dev, "iface");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get iface clock\n");
+ return PTR_ERR(clk);
+ }
+ qcom_iommu->clks[CLK_IFACE].clk = clk;
+
+ clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get bus clock\n");
+ return PTR_ERR(clk);
+ }
+ qcom_iommu->clks[CLK_BUS].clk = clk;
+
+ clk = devm_clk_get_optional(dev, "tbu");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get tbu clock\n");
+ return PTR_ERR(clk);
+ }
+ qcom_iommu->clks[CLK_TBU].clk = clk;
+
+ if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id",
+ &qcom_iommu->sec_id)) {
+ dev_err(dev, "missing qcom,iommu-secure-id property\n");
+ return -ENODEV;
+ }
+
+ if (qcom_iommu_has_secure_context(qcom_iommu)) {
+ ret = qcom_iommu_sec_ptbl_init(dev);
+ if (ret) {
+ dev_err(dev, "cannot init secure pg table(%d)\n", ret);
+ return ret;
+ }
+ }
+
+ platform_set_drvdata(pdev, qcom_iommu);
+
+ pm_runtime_enable(dev);
+
+ /* register context bank devices, which are child nodes: */
+ ret = devm_of_platform_populate(dev);
+ if (ret) {
+ dev_err(dev, "Failed to populate iommu contexts\n");
+ goto err_pm_disable;
+ }
+
+ ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL,
+ dev_name(dev));
+ if (ret) {
+ dev_err(dev, "Failed to register iommu in sysfs\n");
+ goto err_pm_disable;
+ }
+
+ ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev);
+ if (ret) {
+ dev_err(dev, "Failed to register iommu\n");
+ goto err_pm_disable;
+ }
+
+ if (qcom_iommu->local_base) {
+ pm_runtime_get_sync(dev);
+ writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS);
+ pm_runtime_put_sync(dev);
+ }
+
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static void qcom_iommu_device_remove(struct platform_device *pdev)
+{
+ struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
+
+ pm_runtime_force_suspend(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+ iommu_device_sysfs_remove(&qcom_iommu->iommu);
+ iommu_device_unregister(&qcom_iommu->iommu);
+}
+
+static int __maybe_unused qcom_iommu_resume(struct device *dev)
+{
+ struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
+
+ return clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks);
+}
+
+static int __maybe_unused qcom_iommu_suspend(struct device *dev)
+{
+ struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(CLK_NUM, qcom_iommu->clks);
+
+ return 0;
+}
+
+static const struct dev_pm_ops qcom_iommu_pm_ops = {
+ SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static const struct of_device_id qcom_iommu_of_match[] = {
+ { .compatible = "qcom,msm-iommu-v1" },
+ { .compatible = "qcom,msm-iommu-v2" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver qcom_iommu_driver = {
+ .driver = {
+ .name = "qcom-iommu",
+ .of_match_table = qcom_iommu_of_match,
+ .pm = &qcom_iommu_pm_ops,
+ },
+ .probe = qcom_iommu_device_probe,
+ .remove_new = qcom_iommu_device_remove,
+};
+
+static int __init qcom_iommu_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&qcom_iommu_ctx_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&qcom_iommu_driver);
+ if (ret)
+ platform_driver_unregister(&qcom_iommu_ctx_driver);
+
+ return ret;
+}
+device_initcall(qcom_iommu_init);