summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/arm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:17:52 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:17:52 +0000
commit3afb00d3f86d3d924f88b56fa8285d4e9db85852 (patch)
tree95a985d3019522cea546b7d8df621369bc44fc6c /drivers/iommu/arm
parentAdding debian version 6.9.12-1. (diff)
downloadlinux-3afb00d3f86d3d924f88b56fa8285d4e9db85852.tar.xz
linux-3afb00d3f86d3d924f88b56fa8285d4e9db85852.zip
Merging upstream version 6.10.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/iommu/arm')
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/Makefile2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c165
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c468
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c568
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h60
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c481
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c47
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h4
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c20
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h3
10 files changed, 1475 insertions, 343 deletions
diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile
index 54feb1eccc..014a997753 100644
--- a/drivers/iommu/arm/arm-smmu-v3/Makefile
+++ b/drivers/iommu/arm/arm-smmu-v3/Makefile
@@ -3,3 +3,5 @@ obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o
arm_smmu_v3-objs-y += arm-smmu-v3.o
arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
arm_smmu_v3-objs := $(arm_smmu_v3-objs-y)
+
+obj-$(CONFIG_ARM_SMMU_V3_KUNIT_TEST) += arm-smmu-v3-test.o
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 41b44baef1..e490ffb380 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -8,6 +8,7 @@
#include <linux/mmu_notifier.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
+#include <kunit/visibility.h>
#include "arm-smmu-v3.h"
#include "../../io-pgtable-arm.h"
@@ -34,21 +35,25 @@ struct arm_smmu_bond {
static DEFINE_MUTEX(sva_lock);
-/*
- * Write the CD to the CD tables for all masters that this domain is attached
- * to. Note that this is only used to update existing CD entries in the target
- * CD table, for which it's assumed that arm_smmu_write_ctx_desc can't fail.
- */
-static void arm_smmu_update_ctx_desc_devices(struct arm_smmu_domain *smmu_domain,
- int ssid,
- struct arm_smmu_ctx_desc *cd)
+static void
+arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_master *master;
+ struct arm_smmu_cd target_cd;
unsigned long flags;
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
- arm_smmu_write_ctx_desc(master, ssid, cd);
+ struct arm_smmu_cd *cdptr;
+
+ /* S1 domains only support RID attachment right now */
+ cdptr = arm_smmu_get_cd_ptr(master, IOMMU_NO_PASID);
+ if (WARN_ON(!cdptr))
+ continue;
+
+ arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
+ arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr,
+ &target_cd);
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
}
@@ -96,7 +101,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
* be some overlap between use of both ASIDs, until we invalidate the
* TLB.
*/
- arm_smmu_update_ctx_desc_devices(smmu_domain, IOMMU_NO_PASID, cd);
+ arm_smmu_update_s1_domain_cd_entry(smmu_domain);
/* Invalidate TLB entries previously associated with that context */
arm_smmu_tlb_inv_asid(smmu, asid);
@@ -105,11 +110,87 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
return NULL;
}
+static u64 page_size_to_cd(void)
+{
+ static_assert(PAGE_SIZE == SZ_4K || PAGE_SIZE == SZ_16K ||
+ PAGE_SIZE == SZ_64K);
+ if (PAGE_SIZE == SZ_64K)
+ return ARM_LPAE_TCR_TG0_64K;
+ if (PAGE_SIZE == SZ_16K)
+ return ARM_LPAE_TCR_TG0_16K;
+ return ARM_LPAE_TCR_TG0_4K;
+}
+
+VISIBLE_IF_KUNIT
+void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
+ struct arm_smmu_master *master, struct mm_struct *mm,
+ u16 asid)
+{
+ u64 par;
+
+ memset(target, 0, sizeof(*target));
+
+ par = cpuid_feature_extract_unsigned_field(
+ read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1),
+ ID_AA64MMFR0_EL1_PARANGE_SHIFT);
+
+ target->data[0] = cpu_to_le64(
+ CTXDESC_CD_0_TCR_EPD1 |
+#ifdef __BIG_ENDIAN
+ CTXDESC_CD_0_ENDI |
+#endif
+ CTXDESC_CD_0_V |
+ FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par) |
+ CTXDESC_CD_0_AA64 |
+ (master->stall_enabled ? CTXDESC_CD_0_S : 0) |
+ CTXDESC_CD_0_R |
+ CTXDESC_CD_0_A |
+ CTXDESC_CD_0_ASET |
+ FIELD_PREP(CTXDESC_CD_0_ASID, asid));
+
+ /*
+ * If no MM is passed then this creates a SVA entry that faults
+ * everything. arm_smmu_write_cd_entry() can hitlessly go between these
+ * two entries types since TTB0 is ignored by HW when EPD0 is set.
+ */
+ if (mm) {
+ target->data[0] |= cpu_to_le64(
+ FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ,
+ 64ULL - vabits_actual) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_TG0, page_size_to_cd()) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0,
+ ARM_LPAE_TCR_RGN_WBWA) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0,
+ ARM_LPAE_TCR_RGN_WBWA) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS));
+
+ target->data[1] = cpu_to_le64(virt_to_phys(mm->pgd) &
+ CTXDESC_CD_1_TTB0_MASK);
+ } else {
+ target->data[0] |= cpu_to_le64(CTXDESC_CD_0_TCR_EPD0);
+
+ /*
+ * Disable stall and immediately generate an abort if stall
+ * disable is permitted. This speeds up cleanup for an unclean
+ * exit if the device is still doing a lot of DMA.
+ */
+ if (!(master->smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
+ target->data[0] &=
+ cpu_to_le64(~(CTXDESC_CD_0_S | CTXDESC_CD_0_R));
+ }
+
+ /*
+ * MAIR value is pretty much constant and global, so we can just get it
+ * from the current CPU register
+ */
+ target->data[3] = cpu_to_le64(read_sysreg(mair_el1));
+}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd);
+
static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
{
u16 asid;
int err = 0;
- u64 tcr, par, reg;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_ctx_desc *ret = NULL;
@@ -143,39 +224,6 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
if (err)
goto out_free_asid;
- tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) |
- FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
- FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
- FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
- CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
-
- switch (PAGE_SIZE) {
- case SZ_4K:
- tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
- break;
- case SZ_16K:
- tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
- break;
- case SZ_64K:
- tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
- break;
- default:
- WARN_ON(1);
- err = -EINVAL;
- goto out_free_asid;
- }
-
- reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
- par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
- tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
-
- cd->ttbr = virt_to_phys(mm->pgd);
- cd->tcr = tcr;
- /*
- * MAIR value is pretty much constant and global, so we can just get it
- * from the current CPU register
- */
- cd->mair = read_sysreg(mair_el1);
cd->asid = asid;
cd->mm = mm;
@@ -253,6 +301,8 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
+ struct arm_smmu_master *master;
+ unsigned long flags;
mutex_lock(&sva_lock);
if (smmu_mn->cleared) {
@@ -264,8 +314,19 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
* DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
* but disable translation.
*/
- arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
- &quiet_cd);
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+ struct arm_smmu_cd target;
+ struct arm_smmu_cd *cdptr;
+
+ cdptr = arm_smmu_get_cd_ptr(master, mm_get_enqcmd_pasid(mm));
+ if (WARN_ON(!cdptr))
+ continue;
+ arm_smmu_make_sva_cd(&target, master, NULL, smmu_mn->cd->asid);
+ arm_smmu_write_cd_entry(master, mm_get_enqcmd_pasid(mm), cdptr,
+ &target);
+ }
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0, 0);
@@ -360,6 +421,8 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
struct mm_struct *mm)
{
int ret;
+ struct arm_smmu_cd target;
+ struct arm_smmu_cd *cdptr;
struct arm_smmu_bond *bond;
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
@@ -386,9 +449,13 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
goto err_free_bond;
}
- ret = arm_smmu_write_ctx_desc(master, pasid, bond->smmu_mn->cd);
- if (ret)
+ cdptr = arm_smmu_alloc_cd_ptr(master, mm_get_enqcmd_pasid(mm));
+ if (!cdptr) {
+ ret = -ENOMEM;
goto err_put_notifier;
+ }
+ arm_smmu_make_sva_cd(&target, master, mm, bond->smmu_mn->cd->asid);
+ arm_smmu_write_cd_entry(master, pasid, cdptr, &target);
list_add(&bond->list, &master->bonds);
return 0;
@@ -546,7 +613,7 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
mutex_lock(&sva_lock);
- arm_smmu_write_ctx_desc(master, id, NULL);
+ arm_smmu_clear_cd(master, id);
list_for_each_entry(t, &master->bonds, list) {
if (t->mm == mm) {
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
new file mode 100644
index 0000000000..315e487fd9
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2024 Google LLC.
+ */
+#include <kunit/test.h>
+#include <linux/io-pgtable.h>
+
+#include "arm-smmu-v3.h"
+
+struct arm_smmu_test_writer {
+ struct arm_smmu_entry_writer writer;
+ struct kunit *test;
+ const __le64 *init_entry;
+ const __le64 *target_entry;
+ __le64 *entry;
+
+ bool invalid_entry_written;
+ unsigned int num_syncs;
+};
+
+#define NUM_ENTRY_QWORDS 8
+#define NUM_EXPECTED_SYNCS(x) x
+
+static struct arm_smmu_ste bypass_ste;
+static struct arm_smmu_ste abort_ste;
+static struct arm_smmu_device smmu = {
+ .features = ARM_SMMU_FEAT_STALLS | ARM_SMMU_FEAT_ATTR_TYPES_OVR
+};
+static struct mm_struct sva_mm = {
+ .pgd = (void *)0xdaedbeefdeadbeefULL,
+};
+
+static bool arm_smmu_entry_differs_in_used_bits(const __le64 *entry,
+ const __le64 *used_bits,
+ const __le64 *target,
+ unsigned int length)
+{
+ bool differs = false;
+ unsigned int i;
+
+ for (i = 0; i < length; i++) {
+ if ((entry[i] & used_bits[i]) != target[i])
+ differs = true;
+ }
+ return differs;
+}
+
+static void
+arm_smmu_test_writer_record_syncs(struct arm_smmu_entry_writer *writer)
+{
+ struct arm_smmu_test_writer *test_writer =
+ container_of(writer, struct arm_smmu_test_writer, writer);
+ __le64 *entry_used_bits;
+
+ entry_used_bits = kunit_kzalloc(
+ test_writer->test, sizeof(*entry_used_bits) * NUM_ENTRY_QWORDS,
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test_writer->test, entry_used_bits);
+
+ pr_debug("STE value is now set to: ");
+ print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8,
+ test_writer->entry,
+ NUM_ENTRY_QWORDS * sizeof(*test_writer->entry),
+ false);
+
+ test_writer->num_syncs += 1;
+ if (!test_writer->entry[0]) {
+ test_writer->invalid_entry_written = true;
+ } else {
+ /*
+ * At any stage in a hitless transition, the entry must be
+ * equivalent to either the initial entry or the target entry
+ * when only considering the bits used by the current
+ * configuration.
+ */
+ writer->ops->get_used(test_writer->entry, entry_used_bits);
+ KUNIT_EXPECT_FALSE(
+ test_writer->test,
+ arm_smmu_entry_differs_in_used_bits(
+ test_writer->entry, entry_used_bits,
+ test_writer->init_entry, NUM_ENTRY_QWORDS) &&
+ arm_smmu_entry_differs_in_used_bits(
+ test_writer->entry, entry_used_bits,
+ test_writer->target_entry,
+ NUM_ENTRY_QWORDS));
+ }
+}
+
+static void
+arm_smmu_v3_test_debug_print_used_bits(struct arm_smmu_entry_writer *writer,
+ const __le64 *ste)
+{
+ __le64 used_bits[NUM_ENTRY_QWORDS] = {};
+
+ arm_smmu_get_ste_used(ste, used_bits);
+ pr_debug("STE used bits: ");
+ print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, used_bits,
+ sizeof(used_bits), false);
+}
+
+static const struct arm_smmu_entry_writer_ops test_ste_ops = {
+ .sync = arm_smmu_test_writer_record_syncs,
+ .get_used = arm_smmu_get_ste_used,
+};
+
+static const struct arm_smmu_entry_writer_ops test_cd_ops = {
+ .sync = arm_smmu_test_writer_record_syncs,
+ .get_used = arm_smmu_get_cd_used,
+};
+
+static void arm_smmu_v3_test_ste_expect_transition(
+ struct kunit *test, const struct arm_smmu_ste *cur,
+ const struct arm_smmu_ste *target, unsigned int num_syncs_expected,
+ bool hitless)
+{
+ struct arm_smmu_ste cur_copy = *cur;
+ struct arm_smmu_test_writer test_writer = {
+ .writer = {
+ .ops = &test_ste_ops,
+ },
+ .test = test,
+ .init_entry = cur->data,
+ .target_entry = target->data,
+ .entry = cur_copy.data,
+ .num_syncs = 0,
+ .invalid_entry_written = false,
+
+ };
+
+ pr_debug("STE initial value: ");
+ print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data,
+ sizeof(cur_copy), false);
+ arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
+ pr_debug("STE target value: ");
+ print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, target->data,
+ sizeof(cur_copy), false);
+ arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer,
+ target->data);
+
+ arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data);
+
+ KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless);
+ KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected);
+ KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
+}
+
+static void arm_smmu_v3_test_ste_expect_hitless_transition(
+ struct kunit *test, const struct arm_smmu_ste *cur,
+ const struct arm_smmu_ste *target, unsigned int num_syncs_expected)
+{
+ arm_smmu_v3_test_ste_expect_transition(test, cur, target,
+ num_syncs_expected, true);
+}
+
+static const dma_addr_t fake_cdtab_dma_addr = 0xF0F0F0F0F0F0;
+
+static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste,
+ const dma_addr_t dma_addr)
+{
+ struct arm_smmu_master master = {
+ .cd_table.cdtab_dma = dma_addr,
+ .cd_table.s1cdmax = 0xFF,
+ .cd_table.s1fmt = STRTAB_STE_0_S1FMT_64K_L2,
+ .smmu = &smmu,
+ };
+
+ arm_smmu_make_cdtable_ste(ste, &master);
+}
+
+static void arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit *test)
+{
+ /*
+ * Bypass STEs has used bits in the first two Qwords, while abort STEs
+ * only have used bits in the first QWord. Transitioning from bypass to
+ * abort requires two syncs: the first to set the first qword and make
+ * the STE into an abort, the second to clean up the second qword.
+ */
+ arm_smmu_v3_test_ste_expect_hitless_transition(
+ test, &bypass_ste, &abort_ste, NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_ste_test_abort_to_bypass(struct kunit *test)
+{
+ /*
+ * Transitioning from abort to bypass also requires two syncs: the first
+ * to set the second qword data required by the bypass STE, and the
+ * second to set the first qword and switch to bypass.
+ */
+ arm_smmu_v3_test_ste_expect_hitless_transition(
+ test, &abort_ste, &bypass_ste, NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
+ NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
+ NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
+ NUM_EXPECTED_SYNCS(3));
+}
+
+static void arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
+ NUM_EXPECTED_SYNCS(3));
+}
+
+static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste,
+ bool ats_enabled)
+{
+ struct arm_smmu_master master = {
+ .smmu = &smmu,
+ .ats_enabled = ats_enabled,
+ };
+ struct io_pgtable io_pgtable = {};
+ struct arm_smmu_domain smmu_domain = {
+ .pgtbl_ops = &io_pgtable.ops,
+ };
+
+ io_pgtable.cfg.arm_lpae_s2_cfg.vttbr = 0xdaedbeefdeadbeefULL;
+ io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.ps = 1;
+ io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tg = 2;
+ io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sh = 3;
+ io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.orgn = 1;
+ io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.irgn = 2;
+ io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sl = 3;
+ io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tsz = 4;
+
+ arm_smmu_make_s2_domain_ste(ste, &master, &smmu_domain);
+}
+
+static void arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_s2_ste(&ste, true);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
+ NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_ste_test_abort_to_s2(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_s2_ste(&ste, true);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
+ NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_ste_test_s2_to_bypass(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_s2_ste(&ste, true);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
+ NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit *test)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_test_make_s2_ste(&ste, true);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
+ NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_test_cd_expect_transition(
+ struct kunit *test, const struct arm_smmu_cd *cur,
+ const struct arm_smmu_cd *target, unsigned int num_syncs_expected,
+ bool hitless)
+{
+ struct arm_smmu_cd cur_copy = *cur;
+ struct arm_smmu_test_writer test_writer = {
+ .writer = {
+ .ops = &test_cd_ops,
+ },
+ .test = test,
+ .init_entry = cur->data,
+ .target_entry = target->data,
+ .entry = cur_copy.data,
+ .num_syncs = 0,
+ .invalid_entry_written = false,
+
+ };
+
+ pr_debug("CD initial value: ");
+ print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data,
+ sizeof(cur_copy), false);
+ arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
+ pr_debug("CD target value: ");
+ print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, target->data,
+ sizeof(cur_copy), false);
+ arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer,
+ target->data);
+
+ arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data);
+
+ KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless);
+ KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected);
+ KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
+}
+
+static void arm_smmu_v3_test_cd_expect_non_hitless_transition(
+ struct kunit *test, const struct arm_smmu_cd *cur,
+ const struct arm_smmu_cd *target, unsigned int num_syncs_expected)
+{
+ arm_smmu_v3_test_cd_expect_transition(test, cur, target,
+ num_syncs_expected, false);
+}
+
+static void arm_smmu_v3_test_cd_expect_hitless_transition(
+ struct kunit *test, const struct arm_smmu_cd *cur,
+ const struct arm_smmu_cd *target, unsigned int num_syncs_expected)
+{
+ arm_smmu_v3_test_cd_expect_transition(test, cur, target,
+ num_syncs_expected, true);
+}
+
+static void arm_smmu_test_make_s1_cd(struct arm_smmu_cd *cd, unsigned int asid)
+{
+ struct arm_smmu_master master = {
+ .smmu = &smmu,
+ };
+ struct io_pgtable io_pgtable = {};
+ struct arm_smmu_domain smmu_domain = {
+ .pgtbl_ops = &io_pgtable.ops,
+ .cd = {
+ .asid = asid,
+ },
+ };
+
+ io_pgtable.cfg.arm_lpae_s1_cfg.ttbr = 0xdaedbeefdeadbeefULL;
+ io_pgtable.cfg.arm_lpae_s1_cfg.tcr.ips = 1;
+ io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tg = 2;
+ io_pgtable.cfg.arm_lpae_s1_cfg.tcr.sh = 3;
+ io_pgtable.cfg.arm_lpae_s1_cfg.tcr.orgn = 1;
+ io_pgtable.cfg.arm_lpae_s1_cfg.tcr.irgn = 2;
+ io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tsz = 4;
+ io_pgtable.cfg.arm_lpae_s1_cfg.mair = 0xabcdef012345678ULL;
+
+ arm_smmu_make_s1_cd(cd, &master, &smmu_domain);
+}
+
+static void arm_smmu_v3_write_cd_test_s1_clear(struct kunit *test)
+{
+ struct arm_smmu_cd cd = {};
+ struct arm_smmu_cd cd_2;
+
+ arm_smmu_test_make_s1_cd(&cd_2, 1997);
+ arm_smmu_v3_test_cd_expect_non_hitless_transition(
+ test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2));
+ arm_smmu_v3_test_cd_expect_non_hitless_transition(
+ test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_cd_test_s1_change_asid(struct kunit *test)
+{
+ struct arm_smmu_cd cd = {};
+ struct arm_smmu_cd cd_2;
+
+ arm_smmu_test_make_s1_cd(&cd, 778);
+ arm_smmu_test_make_s1_cd(&cd_2, 1997);
+ arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2,
+ NUM_EXPECTED_SYNCS(1));
+ arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd,
+ NUM_EXPECTED_SYNCS(1));
+}
+
+static void arm_smmu_test_make_sva_cd(struct arm_smmu_cd *cd, unsigned int asid)
+{
+ struct arm_smmu_master master = {
+ .smmu = &smmu,
+ };
+
+ arm_smmu_make_sva_cd(cd, &master, &sva_mm, asid);
+}
+
+static void arm_smmu_test_make_sva_release_cd(struct arm_smmu_cd *cd,
+ unsigned int asid)
+{
+ struct arm_smmu_master master = {
+ .smmu = &smmu,
+ };
+
+ arm_smmu_make_sva_cd(cd, &master, NULL, asid);
+}
+
+static void arm_smmu_v3_write_cd_test_sva_clear(struct kunit *test)
+{
+ struct arm_smmu_cd cd = {};
+ struct arm_smmu_cd cd_2;
+
+ arm_smmu_test_make_sva_cd(&cd_2, 1997);
+ arm_smmu_v3_test_cd_expect_non_hitless_transition(
+ test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2));
+ arm_smmu_v3_test_cd_expect_non_hitless_transition(
+ test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2));
+}
+
+static void arm_smmu_v3_write_cd_test_sva_release(struct kunit *test)
+{
+ struct arm_smmu_cd cd;
+ struct arm_smmu_cd cd_2;
+
+ arm_smmu_test_make_sva_cd(&cd, 1997);
+ arm_smmu_test_make_sva_release_cd(&cd_2, 1997);
+ arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2,
+ NUM_EXPECTED_SYNCS(2));
+ arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd,
+ NUM_EXPECTED_SYNCS(2));
+}
+
+static struct kunit_case arm_smmu_v3_test_cases[] = {
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_abort),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_bypass),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_abort),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_cdtable),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_bypass),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_cdtable),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_abort),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_s2),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_bypass),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_s2),
+ KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_clear),
+ KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_change_asid),
+ KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_clear),
+ KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_release),
+ {},
+};
+
+static int arm_smmu_v3_test_suite_init(struct kunit_suite *test)
+{
+ arm_smmu_make_bypass_ste(&smmu, &bypass_ste);
+ arm_smmu_make_abort_ste(&abort_ste);
+ return 0;
+}
+
+static struct kunit_suite arm_smmu_v3_test_module = {
+ .name = "arm-smmu-v3-kunit-test",
+ .suite_init = arm_smmu_v3_test_suite_init,
+ .test_cases = arm_smmu_v3_test_cases,
+};
+kunit_test_suites(&arm_smmu_v3_test_module);
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 3afec8714c..f456bcf189 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -26,15 +26,11 @@
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/platform_device.h>
+#include <kunit/visibility.h>
#include "arm-smmu-v3.h"
#include "../../dma-iommu.h"
-static bool disable_bypass = true;
-module_param(disable_bypass, bool, 0444);
-MODULE_PARM_DESC(disable_bypass,
- "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
-
static bool disable_msipolling;
module_param(disable_msipolling, bool, 0444);
MODULE_PARM_DESC(disable_msipolling,
@@ -47,8 +43,9 @@ enum arm_smmu_msi_index {
ARM_SMMU_MAX_MSIS,
};
-static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu,
- ioasid_t sid);
+#define NUM_ENTRY_QWORDS 8
+static_assert(sizeof(struct arm_smmu_ste) == NUM_ENTRY_QWORDS * sizeof(u64));
+static_assert(sizeof(struct arm_smmu_cd) == NUM_ENTRY_QWORDS * sizeof(u64));
static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
[EVTQ_MSI_INDEX] = {
@@ -76,12 +73,6 @@ struct arm_smmu_option_prop {
DEFINE_XARRAY_ALLOC1(arm_smmu_asid_xa);
DEFINE_MUTEX(arm_smmu_asid_lock);
-/*
- * Special value used by SVA when a process dies, to quiesce a CD without
- * disabling it.
- */
-struct arm_smmu_ctx_desc quiet_cd = { 0 };
-
static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
{ ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
@@ -90,6 +81,7 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu);
+static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master);
static void parse_driver_options(struct arm_smmu_device *smmu)
{
@@ -977,44 +969,45 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
* would be nice if this was complete according to the spec, but minimally it
* has to capture the bits this driver uses.
*/
-static void arm_smmu_get_ste_used(const struct arm_smmu_ste *ent,
- struct arm_smmu_ste *used_bits)
+VISIBLE_IF_KUNIT
+void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
{
- unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent->data[0]));
+ unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent[0]));
- used_bits->data[0] = cpu_to_le64(STRTAB_STE_0_V);
- if (!(ent->data[0] & cpu_to_le64(STRTAB_STE_0_V)))
+ used_bits[0] = cpu_to_le64(STRTAB_STE_0_V);
+ if (!(ent[0] & cpu_to_le64(STRTAB_STE_0_V)))
return;
- used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_CFG);
+ used_bits[0] |= cpu_to_le64(STRTAB_STE_0_CFG);
/* S1 translates */
if (cfg & BIT(0)) {
- used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT |
- STRTAB_STE_0_S1CTXPTR_MASK |
- STRTAB_STE_0_S1CDMAX);
- used_bits->data[1] |=
+ used_bits[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT |
+ STRTAB_STE_0_S1CTXPTR_MASK |
+ STRTAB_STE_0_S1CDMAX);
+ used_bits[1] |=
cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR |
STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH |
STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW |
STRTAB_STE_1_EATS);
- used_bits->data[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
+ used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
}
/* S2 translates */
if (cfg & BIT(1)) {
- used_bits->data[1] |=
+ used_bits[1] |=
cpu_to_le64(STRTAB_STE_1_EATS | STRTAB_STE_1_SHCFG);
- used_bits->data[2] |=
+ used_bits[2] |=
cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR |
STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI |
STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2R);
- used_bits->data[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK);
+ used_bits[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK);
}
if (cfg == STRTAB_STE_0_CFG_BYPASS)
- used_bits->data[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
+ used_bits[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_used);
/*
* Figure out if we can do a hitless update of entry to become target. Returns a
@@ -1022,57 +1015,55 @@ static void arm_smmu_get_ste_used(const struct arm_smmu_ste *ent,
* unused_update is an intermediate value of entry that has unused bits set to
* their new values.
*/
-static u8 arm_smmu_entry_qword_diff(const struct arm_smmu_ste *entry,
- const struct arm_smmu_ste *target,
- struct arm_smmu_ste *unused_update)
+static u8 arm_smmu_entry_qword_diff(struct arm_smmu_entry_writer *writer,
+ const __le64 *entry, const __le64 *target,
+ __le64 *unused_update)
{
- struct arm_smmu_ste target_used = {};
- struct arm_smmu_ste cur_used = {};
+ __le64 target_used[NUM_ENTRY_QWORDS] = {};
+ __le64 cur_used[NUM_ENTRY_QWORDS] = {};
u8 used_qword_diff = 0;
unsigned int i;
- arm_smmu_get_ste_used(entry, &cur_used);
- arm_smmu_get_ste_used(target, &target_used);
+ writer->ops->get_used(entry, cur_used);
+ writer->ops->get_used(target, target_used);
- for (i = 0; i != ARRAY_SIZE(target_used.data); i++) {
+ for (i = 0; i != NUM_ENTRY_QWORDS; i++) {
/*
* Check that masks are up to date, the make functions are not
* allowed to set a bit to 1 if the used function doesn't say it
* is used.
*/
- WARN_ON_ONCE(target->data[i] & ~target_used.data[i]);
+ WARN_ON_ONCE(target[i] & ~target_used[i]);
/* Bits can change because they are not currently being used */
- unused_update->data[i] = (entry->data[i] & cur_used.data[i]) |
- (target->data[i] & ~cur_used.data[i]);
+ unused_update[i] = (entry[i] & cur_used[i]) |
+ (target[i] & ~cur_used[i]);
/*
* Each bit indicates that a used bit in a qword needs to be
* changed after unused_update is applied.
*/
- if ((unused_update->data[i] & target_used.data[i]) !=
- target->data[i])
+ if ((unused_update[i] & target_used[i]) != target[i])
used_qword_diff |= 1 << i;
}
return used_qword_diff;
}
-static bool entry_set(struct arm_smmu_device *smmu, ioasid_t sid,
- struct arm_smmu_ste *entry,
- const struct arm_smmu_ste *target, unsigned int start,
+static bool entry_set(struct arm_smmu_entry_writer *writer, __le64 *entry,
+ const __le64 *target, unsigned int start,
unsigned int len)
{
bool changed = false;
unsigned int i;
for (i = start; len != 0; len--, i++) {
- if (entry->data[i] != target->data[i]) {
- WRITE_ONCE(entry->data[i], target->data[i]);
+ if (entry[i] != target[i]) {
+ WRITE_ONCE(entry[i], target[i]);
changed = true;
}
}
if (changed)
- arm_smmu_sync_ste_for_sid(smmu, sid);
+ writer->ops->sync(writer);
return changed;
}
@@ -1102,24 +1093,22 @@ static bool entry_set(struct arm_smmu_device *smmu, ioasid_t sid,
* V=0 process. This relies on the IGNORED behavior described in the
* specification.
*/
-static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
- struct arm_smmu_ste *entry,
- const struct arm_smmu_ste *target)
+VISIBLE_IF_KUNIT
+void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *entry,
+ const __le64 *target)
{
- unsigned int num_entry_qwords = ARRAY_SIZE(target->data);
- struct arm_smmu_device *smmu = master->smmu;
- struct arm_smmu_ste unused_update;
+ __le64 unused_update[NUM_ENTRY_QWORDS];
u8 used_qword_diff;
used_qword_diff =
- arm_smmu_entry_qword_diff(entry, target, &unused_update);
+ arm_smmu_entry_qword_diff(writer, entry, target, unused_update);
if (hweight8(used_qword_diff) == 1) {
/*
* Only one qword needs its used bits to be changed. This is a
- * hitless update, update all bits the current STE is ignoring
- * to their new values, then update a single "critical qword" to
- * change the STE and finally 0 out any bits that are now unused
- * in the target configuration.
+ * hitless update, update all bits the current STE/CD is
+ * ignoring to their new values, then update a single "critical
+ * qword" to change the STE/CD and finally 0 out any bits that
+ * are now unused in the target configuration.
*/
unsigned int critical_qword_index = ffs(used_qword_diff) - 1;
@@ -1128,22 +1117,21 @@ static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
* writing it in the next step anyways. This can save a sync
* when the only change is in that qword.
*/
- unused_update.data[critical_qword_index] =
- entry->data[critical_qword_index];
- entry_set(smmu, sid, entry, &unused_update, 0, num_entry_qwords);
- entry_set(smmu, sid, entry, target, critical_qword_index, 1);
- entry_set(smmu, sid, entry, target, 0, num_entry_qwords);
+ unused_update[critical_qword_index] =
+ entry[critical_qword_index];
+ entry_set(writer, entry, unused_update, 0, NUM_ENTRY_QWORDS);
+ entry_set(writer, entry, target, critical_qword_index, 1);
+ entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS);
} else if (used_qword_diff) {
/*
* At least two qwords need their inuse bits to be changed. This
* requires a breaking update, zero the V bit, write all qwords
* but 0, then set qword 0
*/
- unused_update.data[0] = entry->data[0] &
- cpu_to_le64(~STRTAB_STE_0_V);
- entry_set(smmu, sid, entry, &unused_update, 0, 1);
- entry_set(smmu, sid, entry, target, 1, num_entry_qwords - 1);
- entry_set(smmu, sid, entry, target, 0, 1);
+ unused_update[0] = 0;
+ entry_set(writer, entry, unused_update, 0, 1);
+ entry_set(writer, entry, target, 1, NUM_ENTRY_QWORDS - 1);
+ entry_set(writer, entry, target, 0, 1);
} else {
/*
* No inuse bit changed. Sanity check that all unused bits are 0
@@ -1151,20 +1139,10 @@ static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
* compute_qword_diff().
*/
WARN_ON_ONCE(
- entry_set(smmu, sid, entry, target, 0, num_entry_qwords));
- }
-
- /* It's likely that we'll want to use the new STE soon */
- if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) {
- struct arm_smmu_cmdq_ent
- prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG,
- .prefetch = {
- .sid = sid,
- } };
-
- arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS));
}
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_write_entry);
static void arm_smmu_sync_cd(struct arm_smmu_master *master,
int ssid, bool leaf)
@@ -1210,117 +1188,166 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst,
u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
CTXDESC_L1_DESC_V;
- /* See comment in arm_smmu_write_ctx_desc() */
+ /* The HW has 64 bit atomicity with stores to the L2 CD table */
WRITE_ONCE(*dst, cpu_to_le64(val));
}
-static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, u32 ssid)
+struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
+ u32 ssid)
{
- __le64 *l1ptr;
- unsigned int idx;
struct arm_smmu_l1_ctx_desc *l1_desc;
- struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
+ if (!cd_table->cdtab)
+ return NULL;
+
if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
- return cd_table->cdtab + ssid * CTXDESC_CD_DWORDS;
+ return (struct arm_smmu_cd *)(cd_table->cdtab +
+ ssid * CTXDESC_CD_DWORDS);
- idx = ssid >> CTXDESC_SPLIT;
- l1_desc = &cd_table->l1_desc[idx];
- if (!l1_desc->l2ptr) {
- if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
+ l1_desc = &cd_table->l1_desc[ssid / CTXDESC_L2_ENTRIES];
+ if (!l1_desc->l2ptr)
+ return NULL;
+ return &l1_desc->l2ptr[ssid % CTXDESC_L2_ENTRIES];
+}
+
+struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
+ u32 ssid)
+{
+ struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
+ struct arm_smmu_device *smmu = master->smmu;
+
+ might_sleep();
+ iommu_group_mutex_assert(master->dev);
+
+ if (!cd_table->cdtab) {
+ if (arm_smmu_alloc_cd_tables(master))
return NULL;
+ }
+
+ if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_64K_L2) {
+ unsigned int idx = ssid / CTXDESC_L2_ENTRIES;
+ struct arm_smmu_l1_ctx_desc *l1_desc;
+
+ l1_desc = &cd_table->l1_desc[idx];
+ if (!l1_desc->l2ptr) {
+ __le64 *l1ptr;
- l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
- arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
- /* An invalid L1CD can be cached */
- arm_smmu_sync_cd(master, ssid, false);
+ if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
+ return NULL;
+
+ l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
+ arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
+ /* An invalid L1CD can be cached */
+ arm_smmu_sync_cd(master, ssid, false);
+ }
}
- idx = ssid & (CTXDESC_L2_ENTRIES - 1);
- return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
+ return arm_smmu_get_cd_ptr(master, ssid);
}
-int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
- struct arm_smmu_ctx_desc *cd)
+struct arm_smmu_cd_writer {
+ struct arm_smmu_entry_writer writer;
+ unsigned int ssid;
+};
+
+VISIBLE_IF_KUNIT
+void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits)
{
+ used_bits[0] = cpu_to_le64(CTXDESC_CD_0_V);
+ if (!(ent[0] & cpu_to_le64(CTXDESC_CD_0_V)))
+ return;
+ memset(used_bits, 0xFF, sizeof(struct arm_smmu_cd));
+
/*
- * This function handles the following cases:
- *
- * (1) Install primary CD, for normal DMA traffic (SSID = IOMMU_NO_PASID = 0).
- * (2) Install a secondary CD, for SID+SSID traffic.
- * (3) Update ASID of a CD. Atomically write the first 64 bits of the
- * CD, then invalidate the old entry and mappings.
- * (4) Quiesce the context without clearing the valid bit. Disable
- * translation, and ignore any translation fault.
- * (5) Remove a secondary CD.
+ * If EPD0 is set by the make function it means
+ * T0SZ/TG0/IR0/OR0/SH0/TTB0 are IGNORED
*/
- u64 val;
- bool cd_live;
- __le64 *cdptr;
- struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
- struct arm_smmu_device *smmu = master->smmu;
+ if (ent[0] & cpu_to_le64(CTXDESC_CD_0_TCR_EPD0)) {
+ used_bits[0] &= ~cpu_to_le64(
+ CTXDESC_CD_0_TCR_T0SZ | CTXDESC_CD_0_TCR_TG0 |
+ CTXDESC_CD_0_TCR_IRGN0 | CTXDESC_CD_0_TCR_ORGN0 |
+ CTXDESC_CD_0_TCR_SH0);
+ used_bits[1] &= ~cpu_to_le64(CTXDESC_CD_1_TTB0_MASK);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_cd_used);
- if (WARN_ON(ssid >= (1 << cd_table->s1cdmax)))
- return -E2BIG;
+static void arm_smmu_cd_writer_sync_entry(struct arm_smmu_entry_writer *writer)
+{
+ struct arm_smmu_cd_writer *cd_writer =
+ container_of(writer, struct arm_smmu_cd_writer, writer);
- cdptr = arm_smmu_get_cd_ptr(master, ssid);
- if (!cdptr)
- return -ENOMEM;
+ arm_smmu_sync_cd(writer->master, cd_writer->ssid, true);
+}
- val = le64_to_cpu(cdptr[0]);
- cd_live = !!(val & CTXDESC_CD_0_V);
-
- if (!cd) { /* (5) */
- val = 0;
- } else if (cd == &quiet_cd) { /* (4) */
- if (!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
- val &= ~(CTXDESC_CD_0_S | CTXDESC_CD_0_R);
- val |= CTXDESC_CD_0_TCR_EPD0;
- } else if (cd_live) { /* (3) */
- val &= ~CTXDESC_CD_0_ASID;
- val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid);
- /*
- * Until CD+TLB invalidation, both ASIDs may be used for tagging
- * this substream's traffic
- */
- } else { /* (1) and (2) */
- cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
- cdptr[2] = 0;
- cdptr[3] = cpu_to_le64(cd->mair);
+static const struct arm_smmu_entry_writer_ops arm_smmu_cd_writer_ops = {
+ .sync = arm_smmu_cd_writer_sync_entry,
+ .get_used = arm_smmu_get_cd_used,
+};
- /*
- * STE may be live, and the SMMU might read dwords of this CD in any
- * order. Ensure that it observes valid values before reading
- * V=1.
- */
- arm_smmu_sync_cd(master, ssid, true);
+void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
+ struct arm_smmu_cd *cdptr,
+ const struct arm_smmu_cd *target)
+{
+ struct arm_smmu_cd_writer cd_writer = {
+ .writer = {
+ .ops = &arm_smmu_cd_writer_ops,
+ .master = master,
+ },
+ .ssid = ssid,
+ };
+
+ arm_smmu_write_entry(&cd_writer.writer, cdptr->data, target->data);
+}
- val = cd->tcr |
+void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
+ struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
+ const struct io_pgtable_cfg *pgtbl_cfg =
+ &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg;
+ typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr =
+ &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
+
+ memset(target, 0, sizeof(*target));
+
+ target->data[0] = cpu_to_le64(
+ FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
#ifdef __BIG_ENDIAN
- CTXDESC_CD_0_ENDI |
+ CTXDESC_CD_0_ENDI |
#endif
- CTXDESC_CD_0_R | CTXDESC_CD_0_A |
- (cd->mm ? 0 : CTXDESC_CD_0_ASET) |
- CTXDESC_CD_0_AA64 |
- FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
- CTXDESC_CD_0_V;
-
- if (cd_table->stall_enabled)
- val |= CTXDESC_CD_0_S;
- }
-
- /*
- * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3
- * "Configuration structures and configuration invalidation completion"
- *
- * The size of single-copy atomic reads made by the SMMU is
- * IMPLEMENTATION DEFINED but must be at least 64 bits. Any single
- * field within an aligned 64-bit span of a structure can be altered
- * without first making the structure invalid.
- */
- WRITE_ONCE(cdptr[0], cpu_to_le64(val));
- arm_smmu_sync_cd(master, ssid, true);
- return 0;
+ CTXDESC_CD_0_TCR_EPD1 |
+ CTXDESC_CD_0_V |
+ FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
+ CTXDESC_CD_0_AA64 |
+ (master->stall_enabled ? CTXDESC_CD_0_S : 0) |
+ CTXDESC_CD_0_R |
+ CTXDESC_CD_0_A |
+ CTXDESC_CD_0_ASET |
+ FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid)
+ );
+ target->data[1] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
+ CTXDESC_CD_1_TTB0_MASK);
+ target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.mair);
+}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s1_cd);
+
+void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid)
+{
+ struct arm_smmu_cd target = {};
+ struct arm_smmu_cd *cdptr;
+
+ if (!master->cd_table.cdtab)
+ return;
+ cdptr = arm_smmu_get_cd_ptr(master, ssid);
+ if (WARN_ON(!cdptr))
+ return;
+ arm_smmu_write_cd_entry(master, ssid, cdptr, &target);
}
static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
@@ -1331,7 +1358,6 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
- cd_table->stall_enabled = master->stall_enabled;
cd_table->s1cdmax = master->ssid_bits;
max_contexts = 1 << cd_table->s1cdmax;
@@ -1429,33 +1455,75 @@ arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span);
val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
- /* See comment in arm_smmu_write_ctx_desc() */
+ /* The HW has 64 bit atomicity with stores to the L2 STE table */
WRITE_ONCE(*dst, cpu_to_le64(val));
}
-static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
+struct arm_smmu_ste_writer {
+ struct arm_smmu_entry_writer writer;
+ u32 sid;
+};
+
+static void arm_smmu_ste_writer_sync_entry(struct arm_smmu_entry_writer *writer)
{
+ struct arm_smmu_ste_writer *ste_writer =
+ container_of(writer, struct arm_smmu_ste_writer, writer);
struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_CFGI_STE,
.cfgi = {
- .sid = sid,
+ .sid = ste_writer->sid,
.leaf = true,
},
};
- arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
+ arm_smmu_cmdq_issue_cmd_with_sync(writer->master->smmu, &cmd);
}
-static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
+static const struct arm_smmu_entry_writer_ops arm_smmu_ste_writer_ops = {
+ .sync = arm_smmu_ste_writer_sync_entry,
+ .get_used = arm_smmu_get_ste_used,
+};
+
+static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
+ struct arm_smmu_ste *ste,
+ const struct arm_smmu_ste *target)
+{
+ struct arm_smmu_device *smmu = master->smmu;
+ struct arm_smmu_ste_writer ste_writer = {
+ .writer = {
+ .ops = &arm_smmu_ste_writer_ops,
+ .master = master,
+ },
+ .sid = sid,
+ };
+
+ arm_smmu_write_entry(&ste_writer.writer, ste->data, target->data);
+
+ /* It's likely that we'll want to use the new STE soon */
+ if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) {
+ struct arm_smmu_cmdq_ent
+ prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG,
+ .prefetch = {
+ .sid = sid,
+ } };
+
+ arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ }
+}
+
+VISIBLE_IF_KUNIT
+void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
{
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT));
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_abort_ste);
-static void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
- struct arm_smmu_ste *target)
+VISIBLE_IF_KUNIT
+void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
+ struct arm_smmu_ste *target)
{
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
@@ -1466,9 +1534,11 @@ static void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
target->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
STRTAB_STE_1_SHCFG_INCOMING));
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_bypass_ste);
-static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
- struct arm_smmu_master *master)
+VISIBLE_IF_KUNIT
+void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master)
{
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
struct arm_smmu_device *smmu = master->smmu;
@@ -1516,10 +1586,12 @@ static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0));
}
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_cdtable_ste);
-static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
- struct arm_smmu_master *master,
- struct arm_smmu_domain *smmu_domain)
+VISIBLE_IF_KUNIT
+void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg;
const struct io_pgtable_cfg *pgtbl_cfg =
@@ -1562,22 +1634,19 @@ static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr &
STRTAB_STE_3_S2TTB_MASK);
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s2_domain_ste);
/*
* This can safely directly manipulate the STE memory without a sync sequence
* because the STE table has not been installed in the SMMU yet.
*/
-static void arm_smmu_init_initial_stes(struct arm_smmu_device *smmu,
- struct arm_smmu_ste *strtab,
+static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab,
unsigned int nent)
{
unsigned int i;
for (i = 0; i < nent; ++i) {
- if (disable_bypass)
- arm_smmu_make_abort_ste(strtab);
- else
- arm_smmu_make_bypass_ste(smmu, strtab);
+ arm_smmu_make_abort_ste(strtab);
strtab++;
}
}
@@ -1605,7 +1674,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return -ENOMEM;
}
- arm_smmu_init_initial_stes(smmu, desc->l2ptr, 1 << STRTAB_SPLIT);
+ arm_smmu_init_initial_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
arm_smmu_write_strtab_l1_desc(strtab, desc);
return 0;
}
@@ -2230,13 +2299,11 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
}
static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
- struct arm_smmu_domain *smmu_domain,
- struct io_pgtable_cfg *pgtbl_cfg)
+ struct arm_smmu_domain *smmu_domain)
{
int ret;
- u32 asid;
+ u32 asid = 0;
struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
- typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
refcount_set(&cd->refs, 1);
@@ -2244,31 +2311,13 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
mutex_lock(&arm_smmu_asid_lock);
ret = xa_alloc(&arm_smmu_asid_xa, &asid, cd,
XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
- if (ret)
- goto out_unlock;
-
cd->asid = (u16)asid;
- cd->ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
- cd->tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
- FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
- FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
- FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
- FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
- FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
- CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
- cd->mair = pgtbl_cfg->arm_lpae_s1_cfg.mair;
-
- mutex_unlock(&arm_smmu_asid_lock);
- return 0;
-
-out_unlock:
mutex_unlock(&arm_smmu_asid_lock);
return ret;
}
static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
- struct arm_smmu_domain *smmu_domain,
- struct io_pgtable_cfg *pgtbl_cfg)
+ struct arm_smmu_domain *smmu_domain)
{
int vmid;
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
@@ -2292,8 +2341,7 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable_ops *pgtbl_ops;
int (*finalise_stage_fn)(struct arm_smmu_device *smmu,
- struct arm_smmu_domain *smmu_domain,
- struct io_pgtable_cfg *pgtbl_cfg);
+ struct arm_smmu_domain *smmu_domain);
/* Restrict the stage to what we can actually support */
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
@@ -2336,7 +2384,7 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
smmu_domain->domain.geometry.force_aperture = true;
- ret = finalise_stage_fn(smmu, smmu_domain, &pgtbl_cfg);
+ ret = finalise_stage_fn(smmu, smmu_domain);
if (ret < 0) {
free_io_pgtable_ops(pgtbl_ops);
return ret;
@@ -2419,7 +2467,10 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master,
pdev = to_pci_dev(master->dev);
atomic_inc(&smmu_domain->nr_ats_masters);
- arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0);
+ /*
+ * ATC invalidation of PASID 0 causes the entire ATC to be flushed.
+ */
+ arm_smmu_atc_inv_master(master);
if (pci_enable_ats(pdev, stu))
dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
}
@@ -2515,6 +2566,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_master *master;
+ struct arm_smmu_cd *cdptr;
if (!fwspec)
return -ENOENT;
@@ -2543,6 +2595,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (ret)
return ret;
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ cdptr = arm_smmu_alloc_cd_ptr(master, IOMMU_NO_PASID);
+ if (!cdptr)
+ return -ENOMEM;
+ }
+
/*
* Prevent arm_smmu_share_asid() from trying to change the ASID
* of either the old or new domain while we are working on it.
@@ -2560,49 +2618,26 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
switch (smmu_domain->stage) {
- case ARM_SMMU_DOMAIN_S1:
- if (!master->cd_table.cdtab) {
- ret = arm_smmu_alloc_cd_tables(master);
- if (ret)
- goto out_list_del;
- } else {
- /*
- * arm_smmu_write_ctx_desc() relies on the entry being
- * invalid to work, clear any existing entry.
- */
- ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
- NULL);
- if (ret)
- goto out_list_del;
- }
-
- ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, &smmu_domain->cd);
- if (ret)
- goto out_list_del;
+ case ARM_SMMU_DOMAIN_S1: {
+ struct arm_smmu_cd target_cd;
+ arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
+ arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr,
+ &target_cd);
arm_smmu_make_cdtable_ste(&target, master);
arm_smmu_install_ste_for_dev(master, &target);
break;
+ }
case ARM_SMMU_DOMAIN_S2:
arm_smmu_make_s2_domain_ste(&target, master, smmu_domain);
arm_smmu_install_ste_for_dev(master, &target);
- if (master->cd_table.cdtab)
- arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
- NULL);
+ arm_smmu_clear_cd(master, IOMMU_NO_PASID);
break;
}
arm_smmu_enable_ats(master, smmu_domain);
- goto out_unlock;
-
-out_list_del:
- spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_del_init(&master->domain_head);
- spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
-
-out_unlock:
mutex_unlock(&arm_smmu_asid_lock);
- return ret;
+ return 0;
}
static int arm_smmu_attach_dev_ste(struct device *dev,
@@ -2636,8 +2671,7 @@ static int arm_smmu_attach_dev_ste(struct device *dev,
* arm_smmu_domain->devices to avoid races updating the same context
* descriptor from arm_smmu_share_asid().
*/
- if (master->cd_table.cdtab)
- arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL);
+ arm_smmu_clear_cd(master, IOMMU_NO_PASID);
return 0;
}
@@ -2915,10 +2949,10 @@ static void arm_smmu_release_device(struct device *dev)
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
/* Put the STE back to what arm_smmu_init_strtab() sets */
- if (disable_bypass && !dev->iommu->require_direct)
- arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev);
- else
+ if (dev->iommu->require_direct)
arm_smmu_attach_dev_identity(&arm_smmu_identity_domain, dev);
+ else
+ arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev);
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
@@ -3053,14 +3087,9 @@ static int arm_smmu_def_domain_type(struct device *dev)
return 0;
}
-static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
+static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
+ struct iommu_domain *domain)
{
- struct iommu_domain *domain;
-
- domain = iommu_get_domain_for_dev_pasid(dev, pasid, IOMMU_DOMAIN_SVA);
- if (WARN_ON(IS_ERR(domain)) || !domain)
- return;
-
arm_smmu_sva_remove_dev_pasid(domain, dev, pasid);
}
@@ -3273,7 +3302,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg;
- arm_smmu_init_initial_stes(smmu, strtab, cfg->num_l1_ents);
+ arm_smmu_init_initial_stes(strtab, cfg->num_l1_ents);
return 0;
}
@@ -3503,7 +3532,7 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
return ret;
}
-static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
+static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
{
int ret;
u32 reg, enables;
@@ -3513,7 +3542,6 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
if (reg & CR0_SMMUEN) {
dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
- WARN_ON(is_kdump_kernel() && !disable_bypass);
arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
}
@@ -3620,14 +3648,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
if (is_kdump_kernel())
enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
- /* Enable the SMMU interface, or ensure bypass */
- if (!bypass || disable_bypass) {
- enables |= CR0_SMMUEN;
- } else {
- ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
- if (ret)
- return ret;
- }
+ /* Enable the SMMU interface */
+ enables |= CR0_SMMUEN;
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
ARM_SMMU_CR0ACK);
if (ret) {
@@ -4019,7 +4041,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
resource_size_t ioaddr;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
- bool bypass;
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu)
@@ -4030,12 +4051,9 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
ret = arm_smmu_device_dt_probe(pdev, smmu);
} else {
ret = arm_smmu_device_acpi_probe(pdev, smmu);
- if (ret == -ENODEV)
- return ret;
}
-
- /* Set bypass mode according to firmware probing result */
- bypass = !!ret;
+ if (ret)
+ return ret;
/* Base address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -4099,7 +4117,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
arm_smmu_rmr_install_bypass_ste(smmu);
/* Reset the device */
- ret = arm_smmu_device_reset(smmu, bypass);
+ ret = arm_smmu_device_reset(smmu);
if (ret)
return ret;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 2a19bb63e5..1242a086c9 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -275,14 +275,18 @@ struct arm_smmu_ste {
* 2lvl: at most 1024 L1 entries,
* 1024 lazy entries per table.
*/
-#define CTXDESC_SPLIT 10
-#define CTXDESC_L2_ENTRIES (1 << CTXDESC_SPLIT)
+#define CTXDESC_L2_ENTRIES 1024
#define CTXDESC_L1_DESC_DWORDS 1
#define CTXDESC_L1_DESC_V (1UL << 0)
#define CTXDESC_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 12)
#define CTXDESC_CD_DWORDS 8
+
+struct arm_smmu_cd {
+ __le64 data[CTXDESC_CD_DWORDS];
+};
+
#define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
#define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
#define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
@@ -583,16 +587,13 @@ struct arm_smmu_strtab_l1_desc {
struct arm_smmu_ctx_desc {
u16 asid;
- u64 ttbr;
- u64 tcr;
- u64 mair;
refcount_t refs;
struct mm_struct *mm;
};
struct arm_smmu_l1_ctx_desc {
- __le64 *l2ptr;
+ struct arm_smmu_cd *l2ptr;
dma_addr_t l2ptr_dma;
};
@@ -604,8 +605,6 @@ struct arm_smmu_ctx_desc_cfg {
u8 s1fmt;
/* log2 of the maximum number of CDs supported by this table */
u8 s1cdmax;
- /* Whether CD entries in this table have the stall bit set. */
- u8 stall_enabled:1;
};
struct arm_smmu_s2_cfg {
@@ -737,6 +736,36 @@ struct arm_smmu_domain {
struct list_head mmu_notifiers;
};
+/* The following are exposed for testing purposes. */
+struct arm_smmu_entry_writer_ops;
+struct arm_smmu_entry_writer {
+ const struct arm_smmu_entry_writer_ops *ops;
+ struct arm_smmu_master *master;
+};
+
+struct arm_smmu_entry_writer_ops {
+ void (*get_used)(const __le64 *entry, __le64 *used);
+ void (*sync)(struct arm_smmu_entry_writer *writer);
+};
+
+#if IS_ENABLED(CONFIG_KUNIT)
+void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits);
+void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *cur,
+ const __le64 *target);
+void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits);
+void arm_smmu_make_abort_ste(struct arm_smmu_ste *target);
+void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
+ struct arm_smmu_ste *target);
+void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master);
+void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain);
+void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
+ struct arm_smmu_master *master, struct mm_struct *mm,
+ u16 asid);
+#endif
+
static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
@@ -744,10 +773,19 @@ static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
extern struct xarray arm_smmu_asid_xa;
extern struct mutex arm_smmu_asid_lock;
-extern struct arm_smmu_ctx_desc quiet_cd;
-int arm_smmu_write_ctx_desc(struct arm_smmu_master *smmu_master, int ssid,
- struct arm_smmu_ctx_desc *cd);
+void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid);
+struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
+ u32 ssid);
+struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
+ u32 ssid);
+void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
+ struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain);
+void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
+ struct arm_smmu_cd *cdptr,
+ const struct arm_smmu_cd *target);
+
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
size_t granule, bool leaf,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
index bb89d49adf..482c40aa02 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -1,15 +1,66 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/device.h>
+#include <linux/interconnect.h>
#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
#include <linux/ratelimit.h>
+#include <linux/spinlock.h>
#include "arm-smmu.h"
#include "arm-smmu-qcom.h"
+#define TBU_DBG_TIMEOUT_US 100
+#define DEBUG_AXUSER_REG 0x30
+#define DEBUG_AXUSER_CDMID GENMASK_ULL(43, 36)
+#define DEBUG_AXUSER_CDMID_VAL 0xff
+#define DEBUG_PAR_REG 0x28
+#define DEBUG_PAR_FAULT_VAL BIT(0)
+#define DEBUG_PAR_PA GENMASK_ULL(47, 12)
+#define DEBUG_SID_HALT_REG 0x0
+#define DEBUG_SID_HALT_VAL BIT(16)
+#define DEBUG_SID_HALT_SID GENMASK(9, 0)
+#define DEBUG_SR_HALT_ACK_REG 0x20
+#define DEBUG_SR_HALT_ACK_VAL BIT(1)
+#define DEBUG_SR_ECATS_RUNNING_VAL BIT(0)
+#define DEBUG_TXN_AXCACHE GENMASK(5, 2)
+#define DEBUG_TXN_AXPROT GENMASK(8, 6)
+#define DEBUG_TXN_AXPROT_PRIV 0x1
+#define DEBUG_TXN_AXPROT_NSEC 0x2
+#define DEBUG_TXN_TRIGG_REG 0x18
+#define DEBUG_TXN_TRIGGER BIT(0)
+#define DEBUG_VA_ADDR_REG 0x8
+
+static LIST_HEAD(tbu_list);
+static DEFINE_MUTEX(tbu_list_lock);
+static DEFINE_SPINLOCK(atos_lock);
+
+struct qcom_tbu {
+ struct device *dev;
+ struct device_node *smmu_np;
+ u32 sid_range[2];
+ struct list_head list;
+ struct clk *clk;
+ struct icc_path *path;
+ void __iomem *base;
+ spinlock_t halt_lock; /* multiple halt or resume can't execute concurrently */
+ int halt_count;
+};
+
+static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
+{
+ return container_of(smmu, struct qcom_smmu, smmu);
+}
+
void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
{
int ret;
@@ -49,3 +100,433 @@ void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
tbu_pwr_status, sync_inv_ack, sync_inv_progress);
}
}
+
+static struct qcom_tbu *qcom_find_tbu(struct qcom_smmu *qsmmu, u32 sid)
+{
+ struct qcom_tbu *tbu;
+ u32 start, end;
+
+ guard(mutex)(&tbu_list_lock);
+
+ if (list_empty(&tbu_list))
+ return NULL;
+
+ list_for_each_entry(tbu, &tbu_list, list) {
+ start = tbu->sid_range[0];
+ end = start + tbu->sid_range[1];
+
+ if (qsmmu->smmu.dev->of_node == tbu->smmu_np &&
+ start <= sid && sid < end)
+ return tbu;
+ }
+ dev_err(qsmmu->smmu.dev, "Unable to find TBU for sid 0x%x\n", sid);
+
+ return NULL;
+}
+
+static int qcom_tbu_halt(struct qcom_tbu *tbu, struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ int ret = 0, idx = smmu_domain->cfg.cbndx;
+ u32 val, fsr, status;
+
+ guard(spinlock_irqsave)(&tbu->halt_lock);
+ if (tbu->halt_count) {
+ tbu->halt_count++;
+ return ret;
+ }
+
+ val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+ val |= DEBUG_SID_HALT_VAL;
+ writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
+
+ fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
+ if ((fsr & ARM_SMMU_FSR_FAULT) && (fsr & ARM_SMMU_FSR_SS)) {
+ u32 sctlr_orig, sctlr;
+
+ /*
+ * We are in a fault. Our request to halt the bus will not
+ * complete until transactions in front of us (such as the fault
+ * itself) have completed. Disable iommu faults and terminate
+ * any existing transactions.
+ */
+ sctlr_orig = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_SCTLR);
+ sctlr = sctlr_orig & ~(ARM_SMMU_SCTLR_CFCFG | ARM_SMMU_SCTLR_CFIE);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr_orig);
+ }
+
+ if (readl_poll_timeout_atomic(tbu->base + DEBUG_SR_HALT_ACK_REG, status,
+ (status & DEBUG_SR_HALT_ACK_VAL),
+ 0, TBU_DBG_TIMEOUT_US)) {
+ dev_err(tbu->dev, "Timeout while trying to halt TBU!\n");
+ ret = -ETIMEDOUT;
+
+ val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+ val &= ~DEBUG_SID_HALT_VAL;
+ writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
+
+ return ret;
+ }
+
+ tbu->halt_count = 1;
+
+ return ret;
+}
+
+static void qcom_tbu_resume(struct qcom_tbu *tbu)
+{
+ u32 val;
+
+ guard(spinlock_irqsave)(&tbu->halt_lock);
+ if (!tbu->halt_count) {
+ WARN(1, "%s: halt_count is 0", dev_name(tbu->dev));
+ return;
+ }
+
+ if (tbu->halt_count > 1) {
+ tbu->halt_count--;
+ return;
+ }
+
+ val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+ val &= ~DEBUG_SID_HALT_VAL;
+ writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
+
+ tbu->halt_count = 0;
+}
+
+static phys_addr_t qcom_tbu_trigger_atos(struct arm_smmu_domain *smmu_domain,
+ struct qcom_tbu *tbu, dma_addr_t iova, u32 sid)
+{
+ bool atos_timedout = false;
+ phys_addr_t phys = 0;
+ ktime_t timeout;
+ u64 val;
+
+ /* Set address and stream-id */
+ val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+ val &= ~DEBUG_SID_HALT_SID;
+ val |= FIELD_PREP(DEBUG_SID_HALT_SID, sid);
+ writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
+ writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
+ val = FIELD_PREP(DEBUG_AXUSER_CDMID, DEBUG_AXUSER_CDMID_VAL);
+ writeq_relaxed(val, tbu->base + DEBUG_AXUSER_REG);
+
+ /* Write-back read and write-allocate */
+ val = FIELD_PREP(DEBUG_TXN_AXCACHE, 0xf);
+
+ /* Non-secure access */
+ val |= FIELD_PREP(DEBUG_TXN_AXPROT, DEBUG_TXN_AXPROT_NSEC);
+
+ /* Privileged access */
+ val |= FIELD_PREP(DEBUG_TXN_AXPROT, DEBUG_TXN_AXPROT_PRIV);
+
+ val |= DEBUG_TXN_TRIGGER;
+ writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
+
+ timeout = ktime_add_us(ktime_get(), TBU_DBG_TIMEOUT_US);
+ for (;;) {
+ val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
+ if (!(val & DEBUG_SR_ECATS_RUNNING_VAL))
+ break;
+ val = readl_relaxed(tbu->base + DEBUG_PAR_REG);
+ if (val & DEBUG_PAR_FAULT_VAL)
+ break;
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ atos_timedout = true;
+ break;
+ }
+ }
+
+ val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
+ if (val & DEBUG_PAR_FAULT_VAL)
+ dev_err(tbu->dev, "ATOS generated a fault interrupt! PAR = %llx, SID=0x%x\n",
+ val, sid);
+ else if (atos_timedout)
+ dev_err_ratelimited(tbu->dev, "ATOS translation timed out!\n");
+ else
+ phys = FIELD_GET(DEBUG_PAR_PA, val);
+
+ /* Reset hardware */
+ writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
+ writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
+ val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+ val &= ~DEBUG_SID_HALT_SID;
+ writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
+
+ return phys;
+}
+
+static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain,
+ dma_addr_t iova, u32 sid)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ int idx = smmu_domain->cfg.cbndx;
+ struct qcom_tbu *tbu;
+ u32 sctlr_orig, sctlr;
+ phys_addr_t phys = 0;
+ int attempt = 0;
+ int ret;
+ u64 fsr;
+
+ tbu = qcom_find_tbu(qsmmu, sid);
+ if (!tbu)
+ return 0;
+
+ ret = icc_set_bw(tbu->path, 0, UINT_MAX);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(tbu->clk);
+ if (ret)
+ goto disable_icc;
+
+ ret = qcom_tbu_halt(tbu, smmu_domain);
+ if (ret)
+ goto disable_clk;
+
+ /*
+ * ATOS/ECATS can trigger the fault interrupt, so disable it temporarily
+ * and check for an interrupt manually.
+ */
+ sctlr_orig = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_SCTLR);
+ sctlr = sctlr_orig & ~(ARM_SMMU_SCTLR_CFCFG | ARM_SMMU_SCTLR_CFIE);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr);
+
+ fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
+ if (fsr & ARM_SMMU_FSR_FAULT) {
+ /* Clear pending interrupts */
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
+
+ /*
+ * TBU halt takes care of resuming any stalled transcation.
+ * Kept it here for completeness sake.
+ */
+ if (fsr & ARM_SMMU_FSR_SS)
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
+ ARM_SMMU_RESUME_TERMINATE);
+ }
+
+ /* Only one concurrent atos operation */
+ scoped_guard(spinlock_irqsave, &atos_lock) {
+ /*
+ * If the translation fails, attempt the lookup more time."
+ */
+ do {
+ phys = qcom_tbu_trigger_atos(smmu_domain, tbu, iova, sid);
+
+ fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
+ if (fsr & ARM_SMMU_FSR_FAULT) {
+ /* Clear pending interrupts */
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
+
+ if (fsr & ARM_SMMU_FSR_SS)
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
+ ARM_SMMU_RESUME_TERMINATE);
+ }
+ } while (!phys && attempt++ < 2);
+
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr_orig);
+ }
+ qcom_tbu_resume(tbu);
+
+ /* Read to complete prior write transcations */
+ readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
+
+disable_clk:
+ clk_disable_unprepare(tbu->clk);
+disable_icc:
+ icc_set_bw(tbu->path, 0, 0);
+
+ return phys;
+}
+
+static phys_addr_t qcom_smmu_iova_to_phys_hard(struct arm_smmu_domain *smmu_domain, dma_addr_t iova)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ int idx = smmu_domain->cfg.cbndx;
+ u32 frsynra;
+ u16 sid;
+
+ frsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
+ sid = FIELD_GET(ARM_SMMU_CBFRSYNRA_SID, frsynra);
+
+ return qcom_iova_to_phys(smmu_domain, iova, sid);
+}
+
+static phys_addr_t qcom_smmu_verify_fault(struct arm_smmu_domain *smmu_domain, dma_addr_t iova, u32 fsr)
+{
+ struct io_pgtable *iop = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ phys_addr_t phys_post_tlbiall;
+ phys_addr_t phys;
+
+ phys = qcom_smmu_iova_to_phys_hard(smmu_domain, iova);
+ io_pgtable_tlb_flush_all(iop);
+ phys_post_tlbiall = qcom_smmu_iova_to_phys_hard(smmu_domain, iova);
+
+ if (phys != phys_post_tlbiall) {
+ dev_err(smmu->dev,
+ "ATOS results differed across TLBIALL... (before: %pa after: %pa)\n",
+ &phys, &phys_post_tlbiall);
+ }
+
+ return (phys == 0 ? phys_post_tlbiall : phys);
+}
+
+irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
+{
+ struct arm_smmu_domain *smmu_domain = dev;
+ struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ u32 fsr, fsynr, cbfrsynra, resume = 0;
+ int idx = smmu_domain->cfg.cbndx;
+ phys_addr_t phys_soft;
+ unsigned long iova;
+ int ret, tmp;
+
+ static DEFINE_RATELIMIT_STATE(_rs,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
+ if (!(fsr & ARM_SMMU_FSR_FAULT))
+ return IRQ_NONE;
+
+ fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
+ iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
+ cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
+
+ if (list_empty(&tbu_list)) {
+ ret = report_iommu_fault(&smmu_domain->domain, NULL, iova,
+ fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
+
+ if (ret == -ENOSYS)
+ dev_err_ratelimited(smmu->dev,
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
+ fsr, iova, fsynr, cbfrsynra, idx);
+
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
+ return IRQ_HANDLED;
+ }
+
+ phys_soft = ops->iova_to_phys(ops, iova);
+
+ tmp = report_iommu_fault(&smmu_domain->domain, NULL, iova,
+ fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
+ if (!tmp || tmp == -EBUSY) {
+ dev_dbg(smmu->dev,
+ "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
+ iova, fsr, fsynr, idx);
+ dev_dbg(smmu->dev, "soft iova-to-phys=%pa\n", &phys_soft);
+ ret = IRQ_HANDLED;
+ resume = ARM_SMMU_RESUME_TERMINATE;
+ } else {
+ phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, iova, fsr);
+
+ if (__ratelimit(&_rs)) {
+ dev_err(smmu->dev,
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
+ fsr, iova, fsynr, cbfrsynra, idx);
+ dev_err(smmu->dev,
+ "FSR = %08x [%s%s%s%s%s%s%s%s%s], SID=0x%x\n",
+ fsr,
+ (fsr & 0x02) ? "TF " : "",
+ (fsr & 0x04) ? "AFF " : "",
+ (fsr & 0x08) ? "PF " : "",
+ (fsr & 0x10) ? "EF " : "",
+ (fsr & 0x20) ? "TLBMCF " : "",
+ (fsr & 0x40) ? "TLBLKF " : "",
+ (fsr & 0x80) ? "MHF " : "",
+ (fsr & 0x40000000) ? "SS " : "",
+ (fsr & 0x80000000) ? "MULTI " : "",
+ cbfrsynra);
+
+ dev_err(smmu->dev,
+ "soft iova-to-phys=%pa\n", &phys_soft);
+ if (!phys_soft)
+ dev_err(smmu->dev,
+ "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
+ dev_name(smmu->dev));
+ if (phys_atos)
+ dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
+ &phys_atos);
+ else
+ dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
+ }
+ ret = IRQ_NONE;
+ resume = ARM_SMMU_RESUME_TERMINATE;
+ }
+
+ /*
+ * If the client returns -EBUSY, do not clear FSR and do not RESUME
+ * if stalled. This is required to keep the IOMMU client stalled on
+ * the outstanding fault. This gives the client a chance to take any
+ * debug action and then terminate the stalled transaction.
+ * So, the sequence in case of stall on fault should be:
+ * 1) Do not clear FSR or write to RESUME here
+ * 2) Client takes any debug action
+ * 3) Client terminates the stalled transaction and resumes the IOMMU
+ * 4) Client clears FSR. The FSR should only be cleared after 3) and
+ * not before so that the fault remains outstanding. This ensures
+ * SCTLR.HUPCF has the desired effect if subsequent transactions also
+ * need to be terminated.
+ */
+ if (tmp != -EBUSY) {
+ /* Clear the faulting FSR */
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
+
+ /* Retry or terminate any stalled transactions */
+ if (fsr & ARM_SMMU_FSR_SS)
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, resume);
+ }
+
+ return ret;
+}
+
+int qcom_tbu_probe(struct platform_device *pdev)
+{
+ struct of_phandle_args args = { .args_count = 2 };
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct qcom_tbu *tbu;
+
+ tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
+ if (!tbu)
+ return -ENOMEM;
+
+ tbu->dev = dev;
+ INIT_LIST_HEAD(&tbu->list);
+ spin_lock_init(&tbu->halt_lock);
+
+ if (of_parse_phandle_with_args(np, "qcom,stream-id-range", "#iommu-cells", 0, &args)) {
+ dev_err(dev, "Cannot parse the 'qcom,stream-id-range' DT property\n");
+ return -EINVAL;
+ }
+
+ tbu->smmu_np = args.np;
+ tbu->sid_range[0] = args.args[0];
+ tbu->sid_range[1] = args.args[1];
+ of_node_put(args.np);
+
+ tbu->base = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(tbu->base))
+ return PTR_ERR(tbu->base);
+
+ tbu->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(tbu->clk))
+ return PTR_ERR(tbu->clk);
+
+ tbu->path = devm_of_icc_get(dev, NULL);
+ if (IS_ERR(tbu->path))
+ return PTR_ERR(tbu->path);
+
+ guard(mutex)(&tbu_list_lock);
+ list_add_tail(&tbu->list, &tbu_list);
+
+ return 0;
+}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 5c7cfc51b5..13f3e2efb2 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -8,6 +8,8 @@
#include <linux/delay.h>
#include <linux/of_device.h>
#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include "arm-smmu.h"
#include "arm-smmu-qcom.h"
@@ -413,6 +415,10 @@ static const struct arm_smmu_impl qcom_smmu_500_impl = {
.reset = arm_mmu500_reset,
.write_s2cr = qcom_smmu_write_s2cr,
.tlb_sync = qcom_smmu_tlb_sync,
+#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
+ .context_fault = qcom_smmu_context_fault,
+ .context_fault_needs_threaded_irq = true,
+#endif
};
static const struct arm_smmu_impl sdm845_smmu_500_impl = {
@@ -422,6 +428,10 @@ static const struct arm_smmu_impl sdm845_smmu_500_impl = {
.reset = qcom_sdm845_smmu500_reset,
.write_s2cr = qcom_smmu_write_s2cr,
.tlb_sync = qcom_smmu_tlb_sync,
+#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
+ .context_fault = qcom_smmu_context_fault,
+ .context_fault_needs_threaded_irq = true,
+#endif
};
static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
@@ -553,10 +563,47 @@ static struct acpi_platform_list qcom_acpi_platlist[] = {
};
#endif
+static int qcom_smmu_tbu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM_DEBUG)) {
+ ret = qcom_tbu_probe(pdev);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->pm_domain) {
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smmu_tbu_of_match[] = {
+ { .compatible = "qcom,sc7280-tbu" },
+ { .compatible = "qcom,sdm845-tbu" },
+ { }
+};
+
+static struct platform_driver qcom_smmu_tbu_driver = {
+ .driver = {
+ .name = "qcom_tbu",
+ .of_match_table = qcom_smmu_tbu_of_match,
+ },
+ .probe = qcom_smmu_tbu_probe,
+};
+
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
{
const struct device_node *np = smmu->dev->of_node;
const struct of_device_id *match;
+ static u8 tbu_registered;
+
+ if (!tbu_registered++)
+ platform_driver_register(&qcom_smmu_tbu_driver);
#ifdef CONFIG_ACPI
if (np == NULL) {
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
index 593910567b..3c134d1a62 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
@@ -30,10 +30,14 @@ struct qcom_smmu_match_data {
const struct arm_smmu_impl *adreno_impl;
};
+irqreturn_t qcom_smmu_context_fault(int irq, void *dev);
+
#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu);
+int qcom_tbu_probe(struct platform_device *pdev);
#else
static inline void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) { }
+static inline int qcom_tbu_probe(struct platform_device *pdev) { return -EINVAL; }
#endif
#endif /* _ARM_SMMU_QCOM_H */
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index c572d877b0..87c81f75cf 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -806,8 +806,16 @@ static int arm_smmu_init_domain_context(struct arm_smmu_domain *smmu_domain,
else
context_fault = arm_smmu_context_fault;
- ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
- "arm-smmu-context-fault", smmu_domain);
+ if (smmu->impl && smmu->impl->context_fault_needs_threaded_irq)
+ ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
+ context_fault,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "arm-smmu-context-fault",
+ smmu_domain);
+ else
+ ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
+ "arm-smmu-context-fault", smmu_domain);
+
if (ret < 0) {
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
cfg->irptndx, irq);
@@ -859,14 +867,10 @@ static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
arm_smmu_rpm_put(smmu);
}
-static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
+static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
{
struct arm_smmu_domain *smmu_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED) {
- if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
- return NULL;
- }
/*
* Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a
@@ -1596,7 +1600,7 @@ static struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
- .domain_alloc = arm_smmu_domain_alloc,
+ .domain_alloc_paging = arm_smmu_domain_alloc_paging,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.probe_finalize = arm_smmu_probe_finalize,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index 836ed6799a..4765c6945c 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -136,6 +136,7 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_CBAR_VMID GENMASK(7, 0)
#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
+#define ARM_SMMU_CBFRSYNRA_SID GENMASK(15, 0)
#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
#define ARM_SMMU_CBA2R_VMID16 GENMASK(31, 16)
@@ -238,6 +239,7 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_CB_ATSR 0x8f0
#define ARM_SMMU_ATSR_ACTIVE BIT(0)
+#define ARM_SMMU_RESUME_TERMINATE BIT(0)
/* Maximum number of context banks per SMMU */
#define ARM_SMMU_MAX_CBS 128
@@ -436,6 +438,7 @@ struct arm_smmu_impl {
int (*def_domain_type)(struct device *dev);
irqreturn_t (*global_fault)(int irq, void *dev);
irqreturn_t (*context_fault)(int irq, void *dev);
+ bool context_fault_needs_threaded_irq;
int (*alloc_context_bank)(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu,
struct device *dev, int start);