summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/arm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:27 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:27 +0000
commit34996e42f82bfd60bc2c191e5cae3c6ab233ec6c (patch)
tree62db60558cbf089714b48daeabca82bf2b20b20e /drivers/iommu/arm
parentAdding debian version 6.8.12-1. (diff)
downloadlinux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.tar.xz
linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.zip
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/iommu/arm')
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c22
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c811
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h6
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c1
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c3
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c3
6 files changed, 534 insertions, 312 deletions
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 4a27fbdb2d..2cd433a9c8 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -10,7 +10,6 @@
#include <linux/slab.h>
#include "arm-smmu-v3.h"
-#include "../../iommu-sva.h"
#include "../../io-pgtable-arm.h"
struct arm_smmu_mmu_notifier {
@@ -364,7 +363,13 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
struct arm_smmu_bond *bond;
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_domain *smmu_domain;
+
+ if (!(domain->type & __IOMMU_DOMAIN_PAGING))
+ return -ENODEV;
+ smmu_domain = to_smmu_domain(domain);
+ if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
+ return -ENODEV;
if (!master || !master->sva_enabled)
return -ENODEV;
@@ -470,7 +475,6 @@ bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
{
- int ret;
struct device *dev = master->dev;
/*
@@ -483,16 +487,7 @@ static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
if (!master->iopf_enabled)
return -EINVAL;
- ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
- if (ret)
- return ret;
-
- ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
- if (ret) {
- iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
- return ret;
- }
- return 0;
+ return iopf_queue_add_device(master->smmu->evtq.iopf, dev);
}
static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
@@ -502,7 +497,6 @@ static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
if (!master->iopf_enabled)
return;
- iommu_unregister_device_fault_handler(dev);
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index f3f2e47b6d..3afec8714c 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -29,7 +29,6 @@
#include "arm-smmu-v3.h"
#include "../../dma-iommu.h"
-#include "../../iommu-sva.h"
static bool disable_bypass = true;
module_param(disable_bypass, bool, 0444);
@@ -48,6 +47,9 @@ enum arm_smmu_msi_index {
ARM_SMMU_MAX_MSIS,
};
+static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu,
+ ioasid_t sid);
+
static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
[EVTQ_MSI_INDEX] = {
ARM_SMMU_EVTQ_IRQ_CFG0,
@@ -86,6 +88,9 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
{ 0, NULL},
};
+static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_device *smmu);
+
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
@@ -921,31 +926,29 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
}
-static int arm_smmu_page_response(struct device *dev,
- struct iommu_fault_event *unused,
- struct iommu_page_response *resp)
+static void arm_smmu_page_response(struct device *dev, struct iopf_fault *unused,
+ struct iommu_page_response *resp)
{
struct arm_smmu_cmdq_ent cmd = {0};
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
int sid = master->streams[0].id;
- if (master->stall_enabled) {
- cmd.opcode = CMDQ_OP_RESUME;
- cmd.resume.sid = sid;
- cmd.resume.stag = resp->grpid;
- switch (resp->code) {
- case IOMMU_PAGE_RESP_INVALID:
- case IOMMU_PAGE_RESP_FAILURE:
- cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT;
- break;
- case IOMMU_PAGE_RESP_SUCCESS:
- cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY;
- break;
- default:
- return -EINVAL;
- }
- } else {
- return -ENODEV;
+ if (WARN_ON(!master->stall_enabled))
+ return;
+
+ cmd.opcode = CMDQ_OP_RESUME;
+ cmd.resume.sid = sid;
+ cmd.resume.stag = resp->grpid;
+ switch (resp->code) {
+ case IOMMU_PAGE_RESP_INVALID:
+ case IOMMU_PAGE_RESP_FAILURE:
+ cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT;
+ break;
+ case IOMMU_PAGE_RESP_SUCCESS:
+ cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY;
+ break;
+ default:
+ break;
}
arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
@@ -955,8 +958,6 @@ static int arm_smmu_page_response(struct device *dev,
* terminated... at some point in the future. PRI_RESP is fire and
* forget.
*/
-
- return 0;
}
/* Context descriptor manipulation functions */
@@ -971,6 +972,200 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
+/*
+ * Based on the value of ent report which bits of the STE the HW will access. It
+ * would be nice if this was complete according to the spec, but minimally it
+ * has to capture the bits this driver uses.
+ */
+static void arm_smmu_get_ste_used(const struct arm_smmu_ste *ent,
+ struct arm_smmu_ste *used_bits)
+{
+ unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent->data[0]));
+
+ used_bits->data[0] = cpu_to_le64(STRTAB_STE_0_V);
+ if (!(ent->data[0] & cpu_to_le64(STRTAB_STE_0_V)))
+ return;
+
+ used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_CFG);
+
+ /* S1 translates */
+ if (cfg & BIT(0)) {
+ used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT |
+ STRTAB_STE_0_S1CTXPTR_MASK |
+ STRTAB_STE_0_S1CDMAX);
+ used_bits->data[1] |=
+ cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR |
+ STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH |
+ STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW |
+ STRTAB_STE_1_EATS);
+ used_bits->data[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
+ }
+
+ /* S2 translates */
+ if (cfg & BIT(1)) {
+ used_bits->data[1] |=
+ cpu_to_le64(STRTAB_STE_1_EATS | STRTAB_STE_1_SHCFG);
+ used_bits->data[2] |=
+ cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR |
+ STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI |
+ STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2R);
+ used_bits->data[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK);
+ }
+
+ if (cfg == STRTAB_STE_0_CFG_BYPASS)
+ used_bits->data[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
+}
+
+/*
+ * Figure out if we can do a hitless update of entry to become target. Returns a
+ * bit mask where 1 indicates that qword needs to be set disruptively.
+ * unused_update is an intermediate value of entry that has unused bits set to
+ * their new values.
+ */
+static u8 arm_smmu_entry_qword_diff(const struct arm_smmu_ste *entry,
+ const struct arm_smmu_ste *target,
+ struct arm_smmu_ste *unused_update)
+{
+ struct arm_smmu_ste target_used = {};
+ struct arm_smmu_ste cur_used = {};
+ u8 used_qword_diff = 0;
+ unsigned int i;
+
+ arm_smmu_get_ste_used(entry, &cur_used);
+ arm_smmu_get_ste_used(target, &target_used);
+
+ for (i = 0; i != ARRAY_SIZE(target_used.data); i++) {
+ /*
+ * Check that masks are up to date, the make functions are not
+ * allowed to set a bit to 1 if the used function doesn't say it
+ * is used.
+ */
+ WARN_ON_ONCE(target->data[i] & ~target_used.data[i]);
+
+ /* Bits can change because they are not currently being used */
+ unused_update->data[i] = (entry->data[i] & cur_used.data[i]) |
+ (target->data[i] & ~cur_used.data[i]);
+ /*
+ * Each bit indicates that a used bit in a qword needs to be
+ * changed after unused_update is applied.
+ */
+ if ((unused_update->data[i] & target_used.data[i]) !=
+ target->data[i])
+ used_qword_diff |= 1 << i;
+ }
+ return used_qword_diff;
+}
+
+static bool entry_set(struct arm_smmu_device *smmu, ioasid_t sid,
+ struct arm_smmu_ste *entry,
+ const struct arm_smmu_ste *target, unsigned int start,
+ unsigned int len)
+{
+ bool changed = false;
+ unsigned int i;
+
+ for (i = start; len != 0; len--, i++) {
+ if (entry->data[i] != target->data[i]) {
+ WRITE_ONCE(entry->data[i], target->data[i]);
+ changed = true;
+ }
+ }
+
+ if (changed)
+ arm_smmu_sync_ste_for_sid(smmu, sid);
+ return changed;
+}
+
+/*
+ * Update the STE/CD to the target configuration. The transition from the
+ * current entry to the target entry takes place over multiple steps that
+ * attempts to make the transition hitless if possible. This function takes care
+ * not to create a situation where the HW can perceive a corrupted entry. HW is
+ * only required to have a 64 bit atomicity with stores from the CPU, while
+ * entries are many 64 bit values big.
+ *
+ * The difference between the current value and the target value is analyzed to
+ * determine which of three updates are required - disruptive, hitless or no
+ * change.
+ *
+ * In the most general disruptive case we can make any update in three steps:
+ * - Disrupting the entry (V=0)
+ * - Fill now unused qwords, execpt qword 0 which contains V
+ * - Make qword 0 have the final value and valid (V=1) with a single 64
+ * bit store
+ *
+ * However this disrupts the HW while it is happening. There are several
+ * interesting cases where a STE/CD can be updated without disturbing the HW
+ * because only a small number of bits are changing (S1DSS, CONFIG, etc) or
+ * because the used bits don't intersect. We can detect this by calculating how
+ * many 64 bit values need update after adjusting the unused bits and skip the
+ * V=0 process. This relies on the IGNORED behavior described in the
+ * specification.
+ */
+static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
+ struct arm_smmu_ste *entry,
+ const struct arm_smmu_ste *target)
+{
+ unsigned int num_entry_qwords = ARRAY_SIZE(target->data);
+ struct arm_smmu_device *smmu = master->smmu;
+ struct arm_smmu_ste unused_update;
+ u8 used_qword_diff;
+
+ used_qword_diff =
+ arm_smmu_entry_qword_diff(entry, target, &unused_update);
+ if (hweight8(used_qword_diff) == 1) {
+ /*
+ * Only one qword needs its used bits to be changed. This is a
+ * hitless update, update all bits the current STE is ignoring
+ * to their new values, then update a single "critical qword" to
+ * change the STE and finally 0 out any bits that are now unused
+ * in the target configuration.
+ */
+ unsigned int critical_qword_index = ffs(used_qword_diff) - 1;
+
+ /*
+ * Skip writing unused bits in the critical qword since we'll be
+ * writing it in the next step anyways. This can save a sync
+ * when the only change is in that qword.
+ */
+ unused_update.data[critical_qword_index] =
+ entry->data[critical_qword_index];
+ entry_set(smmu, sid, entry, &unused_update, 0, num_entry_qwords);
+ entry_set(smmu, sid, entry, target, critical_qword_index, 1);
+ entry_set(smmu, sid, entry, target, 0, num_entry_qwords);
+ } else if (used_qword_diff) {
+ /*
+ * At least two qwords need their inuse bits to be changed. This
+ * requires a breaking update, zero the V bit, write all qwords
+ * but 0, then set qword 0
+ */
+ unused_update.data[0] = entry->data[0] &
+ cpu_to_le64(~STRTAB_STE_0_V);
+ entry_set(smmu, sid, entry, &unused_update, 0, 1);
+ entry_set(smmu, sid, entry, target, 1, num_entry_qwords - 1);
+ entry_set(smmu, sid, entry, target, 0, 1);
+ } else {
+ /*
+ * No inuse bit changed. Sanity check that all unused bits are 0
+ * in the entry. The target was already sanity checked by
+ * compute_qword_diff().
+ */
+ WARN_ON_ONCE(
+ entry_set(smmu, sid, entry, target, 0, num_entry_qwords));
+ }
+
+ /* It's likely that we'll want to use the new STE soon */
+ if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) {
+ struct arm_smmu_cmdq_ent
+ prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG,
+ .prefetch = {
+ .sid = sid,
+ } };
+
+ arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ }
+}
+
static void arm_smmu_sync_cd(struct arm_smmu_master *master,
int ssid, bool leaf)
{
@@ -1251,158 +1446,138 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
-static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
- struct arm_smmu_ste *dst)
+static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
{
- /*
- * This is hideously complicated, but we only really care about
- * three cases at the moment:
- *
- * 1. Invalid (all zero) -> bypass/fault (init)
- * 2. Bypass/fault -> translation/bypass (attach)
- * 3. Translation/bypass -> bypass/fault (detach)
- *
- * Given that we can't update the STE atomically and the SMMU
- * doesn't read the thing in a defined order, that leaves us
- * with the following maintenance requirements:
- *
- * 1. Update Config, return (init time STEs aren't live)
- * 2. Write everything apart from dword 0, sync, write dword 0, sync
- * 3. Update Config, sync
- */
- u64 val = le64_to_cpu(dst->data[0]);
- bool ste_live = false;
- struct arm_smmu_device *smmu = master->smmu;
- struct arm_smmu_ctx_desc_cfg *cd_table = NULL;
- struct arm_smmu_s2_cfg *s2_cfg = NULL;
- struct arm_smmu_domain *smmu_domain = master->domain;
- struct arm_smmu_cmdq_ent prefetch_cmd = {
- .opcode = CMDQ_OP_PREFETCH_CFG,
- .prefetch = {
- .sid = sid,
- },
- };
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT));
+}
- if (smmu_domain) {
- switch (smmu_domain->stage) {
- case ARM_SMMU_DOMAIN_S1:
- cd_table = &master->cd_table;
- break;
- case ARM_SMMU_DOMAIN_S2:
- s2_cfg = &smmu_domain->s2_cfg;
- break;
- default:
- break;
- }
- }
+static void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
+ struct arm_smmu_ste *target)
+{
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS));
- if (val & STRTAB_STE_0_V) {
- switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
- case STRTAB_STE_0_CFG_BYPASS:
- break;
- case STRTAB_STE_0_CFG_S1_TRANS:
- case STRTAB_STE_0_CFG_S2_TRANS:
- ste_live = true;
- break;
- case STRTAB_STE_0_CFG_ABORT:
- BUG_ON(!disable_bypass);
- break;
- default:
- BUG(); /* STE corruption */
- }
- }
+ if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR)
+ target->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
+}
- /* Nuke the existing STE_0 value, as we're going to rewrite it */
- val = STRTAB_STE_0_V;
+static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master)
+{
+ struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
+ struct arm_smmu_device *smmu = master->smmu;
- /* Bypass/fault */
- if (!smmu_domain || !(cd_table || s2_cfg)) {
- if (!smmu_domain && disable_bypass)
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
- else
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
+ FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt) |
+ (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
+ FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax));
+
+ target->data[1] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
+ FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
+ FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
+ FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
+ ((smmu->features & ARM_SMMU_FEAT_STALLS &&
+ !master->stall_enabled) ?
+ STRTAB_STE_1_S1STALLD :
+ 0) |
+ FIELD_PREP(STRTAB_STE_1_EATS,
+ master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0));
+
+ if (smmu->features & ARM_SMMU_FEAT_E2H) {
+ /*
+ * To support BTM the streamworld needs to match the
+ * configuration of the CPU so that the ASID broadcasts are
+ * properly matched. This means either S/NS-EL2-E2H (hypervisor)
+ * or NS-EL1 (guest). Since an SVA domain can be installed in a
+ * PASID this should always use a BTM compatible configuration
+ * if the HW supports it.
+ */
+ target->data[1] |= cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_EL2));
+ } else {
+ target->data[1] |= cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
- dst->data[0] = cpu_to_le64(val);
- dst->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
- STRTAB_STE_1_SHCFG_INCOMING));
- dst->data[2] = 0; /* Nuke the VMID */
/*
- * The SMMU can perform negative caching, so we must sync
- * the STE regardless of whether the old value was live.
+ * VMID 0 is reserved for stage-2 bypass EL1 STEs, see
+ * arm_smmu_domain_alloc_id()
*/
- if (smmu)
- arm_smmu_sync_ste_for_sid(smmu, sid);
- return;
+ target->data[2] =
+ cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0));
}
+}
- if (cd_table) {
- u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
- STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
-
- BUG_ON(ste_live);
- dst->data[1] = cpu_to_le64(
- FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
- FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
- FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
- FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
- FIELD_PREP(STRTAB_STE_1_STRW, strw));
-
- if (smmu->features & ARM_SMMU_FEAT_STALLS &&
- !master->stall_enabled)
- dst->data[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
-
- val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
- FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
- FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax) |
- FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt);
- }
+static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg;
+ const struct io_pgtable_cfg *pgtbl_cfg =
+ &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg;
+ typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr =
+ &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+ u64 vtcr_val;
+ struct arm_smmu_device *smmu = master->smmu;
- if (s2_cfg) {
- BUG_ON(ste_live);
- dst->data[2] = cpu_to_le64(
- FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
- FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS));
+
+ target->data[1] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_EATS,
+ master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0));
+
+ if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR)
+ target->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
+
+ vtcr_val = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
+ target->data[2] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
+ FIELD_PREP(STRTAB_STE_2_VTCR, vtcr_val) |
+ STRTAB_STE_2_S2AA64 |
#ifdef __BIG_ENDIAN
- STRTAB_STE_2_S2ENDI |
+ STRTAB_STE_2_S2ENDI |
#endif
- STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
- STRTAB_STE_2_S2R);
-
- dst->data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
-
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
- }
-
- if (master->ats_enabled)
- dst->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
- STRTAB_STE_1_EATS_TRANS));
+ STRTAB_STE_2_S2PTW |
+ STRTAB_STE_2_S2R);
- arm_smmu_sync_ste_for_sid(smmu, sid);
- /* See comment in arm_smmu_write_ctx_desc() */
- WRITE_ONCE(dst->data[0], cpu_to_le64(val));
- arm_smmu_sync_ste_for_sid(smmu, sid);
-
- /* It's likely that we'll want to use the new STE soon */
- if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
- arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr &
+ STRTAB_STE_3_S2TTB_MASK);
}
-static void arm_smmu_init_bypass_stes(struct arm_smmu_ste *strtab,
- unsigned int nent, bool force)
+/*
+ * This can safely directly manipulate the STE memory without a sync sequence
+ * because the STE table has not been installed in the SMMU yet.
+ */
+static void arm_smmu_init_initial_stes(struct arm_smmu_device *smmu,
+ struct arm_smmu_ste *strtab,
+ unsigned int nent)
{
unsigned int i;
- u64 val = STRTAB_STE_0_V;
-
- if (disable_bypass && !force)
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
- else
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
for (i = 0; i < nent; ++i) {
- strtab->data[0] = cpu_to_le64(val);
- strtab->data[1] = cpu_to_le64(FIELD_PREP(
- STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
- strtab->data[2] = 0;
+ if (disable_bypass)
+ arm_smmu_make_abort_ste(strtab);
+ else
+ arm_smmu_make_bypass_ste(smmu, strtab);
strtab++;
}
}
@@ -1430,7 +1605,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return -ENOMEM;
}
- arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false);
+ arm_smmu_init_initial_stes(smmu, desc->l2ptr, 1 << STRTAB_SPLIT);
arm_smmu_write_strtab_l1_desc(strtab, desc);
return 0;
}
@@ -1460,27 +1635,19 @@ arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
/* IRQ and event handlers */
static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
{
- int ret;
- u32 reason;
+ int ret = 0;
u32 perm = 0;
struct arm_smmu_master *master;
bool ssid_valid = evt[0] & EVTQ_0_SSV;
u32 sid = FIELD_GET(EVTQ_0_SID, evt[0]);
- struct iommu_fault_event fault_evt = { };
+ struct iopf_fault fault_evt = { };
struct iommu_fault *flt = &fault_evt.fault;
switch (FIELD_GET(EVTQ_0_ID, evt[0])) {
case EVT_ID_TRANSLATION_FAULT:
- reason = IOMMU_FAULT_REASON_PTE_FETCH;
- break;
case EVT_ID_ADDR_SIZE_FAULT:
- reason = IOMMU_FAULT_REASON_OOR_ADDRESS;
- break;
case EVT_ID_ACCESS_FAULT:
- reason = IOMMU_FAULT_REASON_ACCESS;
- break;
case EVT_ID_PERMISSION_FAULT:
- reason = IOMMU_FAULT_REASON_PERMISSION;
break;
default:
return -EOPNOTSUPP;
@@ -1490,6 +1657,9 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
if (evt[1] & EVTQ_1_S2)
return -EFAULT;
+ if (!(evt[1] & EVTQ_1_STALL))
+ return -EOPNOTSUPP;
+
if (evt[1] & EVTQ_1_RnW)
perm |= IOMMU_FAULT_PERM_READ;
else
@@ -1501,32 +1671,17 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
if (evt[1] & EVTQ_1_PnU)
perm |= IOMMU_FAULT_PERM_PRIV;
- if (evt[1] & EVTQ_1_STALL) {
- flt->type = IOMMU_FAULT_PAGE_REQ;
- flt->prm = (struct iommu_fault_page_request) {
- .flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
- .grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
- .perm = perm,
- .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
- };
-
- if (ssid_valid) {
- flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
- flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
- }
- } else {
- flt->type = IOMMU_FAULT_DMA_UNRECOV;
- flt->event = (struct iommu_fault_unrecoverable) {
- .reason = reason,
- .flags = IOMMU_FAULT_UNRECOV_ADDR_VALID,
- .perm = perm,
- .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
- };
+ flt->type = IOMMU_FAULT_PAGE_REQ;
+ flt->prm = (struct iommu_fault_page_request) {
+ .flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
+ .grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
+ .perm = perm,
+ .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
+ };
- if (ssid_valid) {
- flt->event.flags |= IOMMU_FAULT_UNRECOV_PASID_VALID;
- flt->event.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
- }
+ if (ssid_valid) {
+ flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+ flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
}
mutex_lock(&smmu->streams_mutex);
@@ -1536,17 +1691,7 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
goto out_unlock;
}
- ret = iommu_report_device_fault(master->dev, &fault_evt);
- if (ret && flt->type == IOMMU_FAULT_PAGE_REQ) {
- /* Nobody cared, abort the access */
- struct iommu_page_response resp = {
- .pasid = flt->prm.pasid,
- .grpid = flt->prm.grpid,
- .code = IOMMU_PAGE_RESP_FAILURE,
- };
- arm_smmu_page_response(master->dev, &fault_evt, &resp);
- }
-
+ iommu_report_device_fault(master->dev, &fault_evt);
out_unlock:
mutex_unlock(&smmu->streams_mutex);
return ret;
@@ -2025,15 +2170,15 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
- struct arm_smmu_domain *smmu_domain;
if (type == IOMMU_DOMAIN_SVA)
return arm_smmu_sva_domain_alloc();
+ return ERR_PTR(-EOPNOTSUPP);
+}
- if (type != IOMMU_DOMAIN_UNMANAGED &&
- type != IOMMU_DOMAIN_DMA &&
- type != IOMMU_DOMAIN_IDENTITY)
- return NULL;
+static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
+{
+ struct arm_smmu_domain *smmu_domain;
/*
* Allocate the domain and initialise some of its data structures.
@@ -2042,13 +2187,23 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
*/
smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
if (!smmu_domain)
- return NULL;
+ return ERR_PTR(-ENOMEM);
mutex_init(&smmu_domain->init_mutex);
INIT_LIST_HEAD(&smmu_domain->devices);
spin_lock_init(&smmu_domain->devices_lock);
INIT_LIST_HEAD(&smmu_domain->mmu_notifiers);
+ if (dev) {
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ int ret;
+
+ ret = arm_smmu_domain_finalise(smmu_domain, master->smmu);
+ if (ret) {
+ kfree(smmu_domain);
+ return ERR_PTR(ret);
+ }
+ }
return &smmu_domain->domain;
}
@@ -2074,12 +2229,12 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
kfree(smmu_domain);
}
-static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
+static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
+ struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
int ret;
u32 asid;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
@@ -2111,13 +2266,12 @@ out_unlock:
return ret;
}
-static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
+static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
+ struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
int vmid;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
- typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
/* Reserve VMID 0 for stage-2 bypass STEs */
vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1,
@@ -2125,35 +2279,21 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
if (vmid < 0)
return vmid;
- vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
cfg->vmid = (u16)vmid;
- cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
- cfg->vtcr = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
return 0;
}
-static int arm_smmu_domain_finalise(struct iommu_domain *domain)
+static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_device *smmu)
{
int ret;
unsigned long ias, oas;
enum io_pgtable_fmt fmt;
struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable_ops *pgtbl_ops;
- int (*finalise_stage_fn)(struct arm_smmu_domain *,
- struct io_pgtable_cfg *);
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- struct arm_smmu_device *smmu = smmu_domain->smmu;
-
- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
- smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
- return 0;
- }
+ int (*finalise_stage_fn)(struct arm_smmu_device *smmu,
+ struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg);
/* Restrict the stage to what we can actually support */
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
@@ -2192,17 +2332,18 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
if (!pgtbl_ops)
return -ENOMEM;
- domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
- domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
- domain->geometry.force_aperture = true;
+ smmu_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+ smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
+ smmu_domain->domain.geometry.force_aperture = true;
- ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
+ ret = finalise_stage_fn(smmu, smmu_domain, &pgtbl_cfg);
if (ret < 0) {
free_io_pgtable_ops(pgtbl_ops);
return ret;
}
smmu_domain->pgtbl_ops = pgtbl_ops;
+ smmu_domain->smmu = smmu;
return 0;
}
@@ -2225,7 +2366,8 @@ arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
}
}
-static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
+static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master,
+ const struct arm_smmu_ste *target)
{
int i, j;
struct arm_smmu_device *smmu = master->smmu;
@@ -2242,7 +2384,7 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
if (j < i)
continue;
- arm_smmu_write_strtab_ent(master, sid, step);
+ arm_smmu_write_ste(master, sid, step, target);
}
}
@@ -2261,12 +2403,12 @@ static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev));
}
-static void arm_smmu_enable_ats(struct arm_smmu_master *master)
+static void arm_smmu_enable_ats(struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
{
size_t stu;
struct pci_dev *pdev;
struct arm_smmu_device *smmu = master->smmu;
- struct arm_smmu_domain *smmu_domain = master->domain;
/* Don't enable ATS at the endpoint if it's not enabled in the STE */
if (!master->ats_enabled)
@@ -2282,10 +2424,9 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master)
dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
}
-static void arm_smmu_disable_ats(struct arm_smmu_master *master)
+static void arm_smmu_disable_ats(struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
{
- struct arm_smmu_domain *smmu_domain = master->domain;
-
if (!master->ats_enabled)
return;
@@ -2348,35 +2489,28 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
static void arm_smmu_detach_dev(struct arm_smmu_master *master)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(master->dev);
+ struct arm_smmu_domain *smmu_domain;
unsigned long flags;
- struct arm_smmu_domain *smmu_domain = master->domain;
- if (!smmu_domain)
+ if (!domain || !(domain->type & __IOMMU_DOMAIN_PAGING))
return;
- arm_smmu_disable_ats(master);
+ smmu_domain = to_smmu_domain(domain);
+ arm_smmu_disable_ats(master, smmu_domain);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_del(&master->domain_head);
+ list_del_init(&master->domain_head);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- master->domain = NULL;
master->ats_enabled = false;
- arm_smmu_install_ste_for_dev(master);
- /*
- * Clearing the CD entry isn't strictly required to detach the domain
- * since the table is uninstalled anyway, but it helps avoid confusion
- * in the call to arm_smmu_write_ctx_desc on the next attach (which
- * expects the entry to be empty).
- */
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && master->cd_table.cdtab)
- arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL);
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
unsigned long flags;
+ struct arm_smmu_ste target;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -2401,10 +2535,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
mutex_lock(&smmu_domain->init_mutex);
if (!smmu_domain->smmu) {
- smmu_domain->smmu = smmu;
- ret = arm_smmu_domain_finalise(domain);
- if (ret)
- smmu_domain->smmu = NULL;
+ ret = arm_smmu_domain_finalise(smmu_domain, smmu);
} else if (smmu_domain->smmu != smmu)
ret = -EINVAL;
@@ -2422,46 +2553,51 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
arm_smmu_detach_dev(master);
- master->domain = smmu_domain;
-
- /*
- * The SMMU does not support enabling ATS with bypass. When the STE is
- * in bypass (STE.Config[2:0] == 0b100), ATS Translation Requests and
- * Translated transactions are denied as though ATS is disabled for the
- * stream (STE.EATS == 0b00), causing F_BAD_ATS_TREQ and
- * F_TRANSL_FORBIDDEN events (IHI0070Ea 5.2 Stream Table Entry).
- */
- if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
- master->ats_enabled = arm_smmu_ats_supported(master);
+ master->ats_enabled = arm_smmu_ats_supported(master);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_add(&master->domain_head, &smmu_domain->devices);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ switch (smmu_domain->stage) {
+ case ARM_SMMU_DOMAIN_S1:
if (!master->cd_table.cdtab) {
ret = arm_smmu_alloc_cd_tables(master);
- if (ret) {
- master->domain = NULL;
+ if (ret)
+ goto out_list_del;
+ } else {
+ /*
+ * arm_smmu_write_ctx_desc() relies on the entry being
+ * invalid to work, clear any existing entry.
+ */
+ ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
+ NULL);
+ if (ret)
goto out_list_del;
- }
}
ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, &smmu_domain->cd);
- if (ret) {
- master->domain = NULL;
+ if (ret)
goto out_list_del;
- }
- }
- arm_smmu_install_ste_for_dev(master);
+ arm_smmu_make_cdtable_ste(&target, master);
+ arm_smmu_install_ste_for_dev(master, &target);
+ break;
+ case ARM_SMMU_DOMAIN_S2:
+ arm_smmu_make_s2_domain_ste(&target, master, smmu_domain);
+ arm_smmu_install_ste_for_dev(master, &target);
+ if (master->cd_table.cdtab)
+ arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
+ NULL);
+ break;
+ }
- arm_smmu_enable_ats(master);
+ arm_smmu_enable_ats(master, smmu_domain);
goto out_unlock;
out_list_del:
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_del(&master->domain_head);
+ list_del_init(&master->domain_head);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
out_unlock:
@@ -2469,6 +2605,79 @@ out_unlock:
return ret;
}
+static int arm_smmu_attach_dev_ste(struct device *dev,
+ struct arm_smmu_ste *ste)
+{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+ if (arm_smmu_master_sva_enabled(master))
+ return -EBUSY;
+
+ /*
+ * Do not allow any ASID to be changed while are working on the STE,
+ * otherwise we could miss invalidations.
+ */
+ mutex_lock(&arm_smmu_asid_lock);
+
+ /*
+ * The SMMU does not support enabling ATS with bypass/abort. When the
+ * STE is in bypass (STE.Config[2:0] == 0b100), ATS Translation Requests
+ * and Translated transactions are denied as though ATS is disabled for
+ * the stream (STE.EATS == 0b00), causing F_BAD_ATS_TREQ and
+ * F_TRANSL_FORBIDDEN events (IHI0070Ea 5.2 Stream Table Entry).
+ */
+ arm_smmu_detach_dev(master);
+
+ arm_smmu_install_ste_for_dev(master, ste);
+ mutex_unlock(&arm_smmu_asid_lock);
+
+ /*
+ * This has to be done after removing the master from the
+ * arm_smmu_domain->devices to avoid races updating the same context
+ * descriptor from arm_smmu_share_asid().
+ */
+ if (master->cd_table.cdtab)
+ arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL);
+ return 0;
+}
+
+static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct arm_smmu_ste ste;
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+ arm_smmu_make_bypass_ste(master->smmu, &ste);
+ return arm_smmu_attach_dev_ste(dev, &ste);
+}
+
+static const struct iommu_domain_ops arm_smmu_identity_ops = {
+ .attach_dev = arm_smmu_attach_dev_identity,
+};
+
+static struct iommu_domain arm_smmu_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &arm_smmu_identity_ops,
+};
+
+static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_make_abort_ste(&ste);
+ return arm_smmu_attach_dev_ste(dev, &ste);
+}
+
+static const struct iommu_domain_ops arm_smmu_blocked_ops = {
+ .attach_dev = arm_smmu_attach_dev_blocked,
+};
+
+static struct iommu_domain arm_smmu_blocked_domain = {
+ .type = IOMMU_DOMAIN_BLOCKED,
+ .ops = &arm_smmu_blocked_ops,
+};
+
static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -2662,6 +2871,7 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
master->dev = dev;
master->smmu = smmu;
INIT_LIST_HEAD(&master->bonds);
+ INIT_LIST_HEAD(&master->domain_head);
dev_iommu_priv_set(dev, master);
ret = arm_smmu_insert_master(smmu, master);
@@ -2703,7 +2913,13 @@ static void arm_smmu_release_device(struct device *dev)
if (WARN_ON(arm_smmu_master_sva_enabled(master)))
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
- arm_smmu_detach_dev(master);
+
+ /* Put the STE back to what arm_smmu_init_strtab() sets */
+ if (disable_bypass && !dev->iommu->require_direct)
+ arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev);
+ else
+ arm_smmu_attach_dev_identity(&arm_smmu_identity_domain, dev);
+
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
if (master->cd_table.cdtab)
@@ -2743,7 +2959,8 @@ static int arm_smmu_enable_nesting(struct iommu_domain *domain)
return ret;
}
-static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int arm_smmu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
return iommu_fwspec_add_ids(dev, args->args, 1);
}
@@ -2848,8 +3065,11 @@ static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
}
static struct iommu_ops arm_smmu_ops = {
+ .identity_domain = &arm_smmu_identity_domain,
+ .blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
+ .domain_alloc_paging = arm_smmu_domain_alloc_paging,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
@@ -3053,7 +3273,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg;
- arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false);
+ arm_smmu_init_initial_stes(smmu, strtab, cfg->num_l1_ents);
return 0;
}
@@ -3129,7 +3349,8 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
static void arm_smmu_free_msis(void *data)
{
struct device *dev = data;
- platform_msi_domain_free_irqs(dev);
+
+ platform_device_msi_free_irqs_all(dev);
}
static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
@@ -3170,7 +3391,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
}
/* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
- ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
+ ret = platform_device_msi_init_and_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
if (ret) {
dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n");
return;
@@ -3181,7 +3402,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
/* Add callback to free MSIs on teardown */
- devm_add_action(dev, arm_smmu_free_msis, dev);
+ devm_add_action_or_reset(dev, arm_smmu_free_msis, dev);
}
static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
@@ -3565,6 +3786,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
return -ENXIO;
}
+ if (reg & IDR1_ATTR_TYPES_OVR)
+ smmu->features |= ARM_SMMU_FEAT_ATTR_TYPES_OVR;
+
/* Queue sizes, capped to ensure natural alignment */
smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
FIELD_GET(IDR1_CMDQS, reg));
@@ -3764,7 +3988,6 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
list_for_each_entry(e, &rmr_list, list) {
- struct arm_smmu_ste *step;
struct iommu_iort_rmr_data *rmr;
int ret, i;
@@ -3777,8 +4000,12 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
continue;
}
- step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]);
- arm_smmu_init_bypass_stes(step, 1, true);
+ /*
+ * STE table is not programmed to HW, see
+ * arm_smmu_initial_bypass_stes()
+ */
+ arm_smmu_make_bypass_ste(smmu,
+ arm_smmu_get_step_for_sid(smmu, rmr->sids[i]));
}
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 65fb388d51..2a19bb63e5 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -44,6 +44,7 @@
#define IDR1_TABLES_PRESET (1 << 30)
#define IDR1_QUEUES_PRESET (1 << 29)
#define IDR1_REL (1 << 28)
+#define IDR1_ATTR_TYPES_OVR (1 << 27)
#define IDR1_CMDQS GENMASK(25, 21)
#define IDR1_EVTQS GENMASK(20, 16)
#define IDR1_PRIQS GENMASK(15, 11)
@@ -609,8 +610,6 @@ struct arm_smmu_ctx_desc_cfg {
struct arm_smmu_s2_cfg {
u16 vmid;
- u64 vttbr;
- u64 vtcr;
};
struct arm_smmu_strtab_cfg {
@@ -649,6 +648,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_SVA (1 << 17)
#define ARM_SMMU_FEAT_E2H (1 << 18)
#define ARM_SMMU_FEAT_NESTING (1 << 19)
+#define ARM_SMMU_FEAT_ATTR_TYPES_OVR (1 << 20)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
@@ -697,7 +697,6 @@ struct arm_smmu_stream {
struct arm_smmu_master {
struct arm_smmu_device *smmu;
struct device *dev;
- struct arm_smmu_domain *domain;
struct list_head domain_head;
struct arm_smmu_stream *streams;
/* Locked by the iommu core using the group mutex */
@@ -715,7 +714,6 @@ struct arm_smmu_master {
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
- ARM_SMMU_DOMAIN_BYPASS,
};
struct arm_smmu_domain {
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 8b04ece004..5c7cfc51b5 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -260,6 +260,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sm6375-mdss" },
{ .compatible = "qcom,sm8150-mdss" },
{ .compatible = "qcom,sm8250-mdss" },
+ { .compatible = "qcom,x1e80100-mdss" },
{ }
};
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 6317aaf7b3..c572d877b0 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -1546,7 +1546,8 @@ static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain,
return ret;
}
-static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int arm_smmu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
u32 mask, fwid = 0;
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 17a1c163fe..e079bb7a99 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -546,7 +546,8 @@ static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
return &qcom_iommu->iommu;
}
-static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int qcom_iommu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct qcom_iommu_dev *qcom_iommu;
struct platform_device *iommu_pdev;