summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd/iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/amd/iommu.c')
-rw-r--r--drivers/iommu/amd/iommu.c193
1 files changed, 92 insertions, 101 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index fcc987f5d4..4283dd8191 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -64,7 +64,7 @@ LIST_HEAD(hpet_map);
LIST_HEAD(acpihid_map);
const struct iommu_ops amd_iommu_ops;
-const struct iommu_dirty_ops amd_dirty_ops;
+static const struct iommu_dirty_ops amd_dirty_ops;
int amd_iommu_max_glx_val = -1;
@@ -85,6 +85,11 @@ static void detach_device(struct device *dev);
*
****************************************************************************/
+static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
+{
+ return (pdom && (pdom->flags & PD_IOMMUV2_MASK));
+}
+
static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
@@ -551,8 +556,6 @@ static void amd_iommu_uninit_device(struct device *dev)
if (dev_data->domain)
detach_device(dev);
- dev_iommu_priv_set(dev, NULL);
-
/*
* We keep dev_data around for unplugged devices and reuse it when the
* device is re-plugged - not doing so would introduce a ton of races.
@@ -1124,68 +1127,44 @@ static inline u64 build_inv_address(u64 address, size_t size)
}
static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
- size_t size, u16 domid, int pde)
+ size_t size, u16 domid,
+ ioasid_t pasid, bool gn)
{
u64 inv_address = build_inv_address(address, size);
memset(cmd, 0, sizeof(*cmd));
+
cmd->data[1] |= domid;
cmd->data[2] = lower_32_bits(inv_address);
cmd->data[3] = upper_32_bits(inv_address);
+ /* PDE bit - we want to flush everything, not only the PTEs */
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+ if (gn) {
+ cmd->data[0] |= pasid;
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ }
CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
- if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
}
static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
- u64 address, size_t size)
+ u64 address, size_t size,
+ ioasid_t pasid, bool gn)
{
u64 inv_address = build_inv_address(address, size);
memset(cmd, 0, sizeof(*cmd));
+
cmd->data[0] = devid;
cmd->data[0] |= (qdep & 0xff) << 24;
cmd->data[1] = devid;
cmd->data[2] = lower_32_bits(inv_address);
cmd->data[3] = upper_32_bits(inv_address);
- CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
-}
-
-static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
- u64 address, bool size)
-{
- memset(cmd, 0, sizeof(*cmd));
-
- address &= ~(0xfffULL);
-
- cmd->data[0] = pasid;
- cmd->data[1] = domid;
- cmd->data[2] = lower_32_bits(address);
- cmd->data[3] = upper_32_bits(address);
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
- if (size)
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
- CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
-}
-
-static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
- int qdep, u64 address, bool size)
-{
- memset(cmd, 0, sizeof(*cmd));
-
- address &= ~(0xfffULL);
+ if (gn) {
+ cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
+ cmd->data[1] |= (pasid & 0xff) << 16;
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ }
- cmd->data[0] = devid;
- cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
- cmd->data[0] |= (qdep & 0xff) << 24;
- cmd->data[1] = devid;
- cmd->data[1] |= (pasid & 0xff) << 16;
- cmd->data[2] = lower_32_bits(address);
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
- cmd->data[3] = upper_32_bits(address);
- if (size)
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
}
@@ -1341,7 +1320,7 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
struct iommu_cmd cmd;
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
- dom_id, 1);
+ dom_id, IOMMU_NO_PASID, false);
iommu_queue_command(iommu, &cmd);
}
@@ -1353,7 +1332,7 @@ static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
struct iommu_cmd cmd;
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
- dom_id, 1);
+ dom_id, IOMMU_NO_PASID, false);
iommu_queue_command(iommu, &cmd);
iommu_completion_wait(iommu);
@@ -1392,7 +1371,7 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
iommu_completion_wait(iommu);
}
-void iommu_flush_all_caches(struct amd_iommu *iommu)
+void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
{
if (check_feature(FEATURE_IA)) {
amd_iommu_flush_all(iommu);
@@ -1406,8 +1385,8 @@ void iommu_flush_all_caches(struct amd_iommu *iommu)
/*
* Command send function for flushing on-device TLB
*/
-static int device_flush_iotlb(struct iommu_dev_data *dev_data,
- u64 address, size_t size)
+static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address,
+ size_t size, ioasid_t pasid, bool gn)
{
struct amd_iommu *iommu;
struct iommu_cmd cmd;
@@ -1418,7 +1397,8 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
if (!iommu)
return -EINVAL;
- build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
+ build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address,
+ size, pasid, gn);
return iommu_queue_command(iommu, &cmd);
}
@@ -1464,8 +1444,11 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
return ret;
}
- if (dev_data->ats_enabled)
- ret = device_flush_iotlb(dev_data, 0, ~0UL);
+ if (dev_data->ats_enabled) {
+ /* Invalidate the entire contents of an IOTLB */
+ ret = device_flush_iotlb(dev_data, 0, ~0UL,
+ IOMMU_NO_PASID, false);
+ }
return ret;
}
@@ -1476,13 +1459,18 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
* page. Otherwise it flushes the whole TLB of the IOMMU.
*/
static void __domain_flush_pages(struct protection_domain *domain,
- u64 address, size_t size, int pde)
+ u64 address, size_t size)
{
struct iommu_dev_data *dev_data;
struct iommu_cmd cmd;
int ret = 0, i;
+ ioasid_t pasid = IOMMU_NO_PASID;
+ bool gn = false;
+
+ if (pdom_is_v2_pgtbl_mode(domain))
+ gn = true;
- build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
+ build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, gn);
for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
if (!domain->dev_iommu[i])
@@ -1500,17 +1488,21 @@ static void __domain_flush_pages(struct protection_domain *domain,
if (!dev_data->ats_enabled)
continue;
- ret |= device_flush_iotlb(dev_data, address, size);
+ ret |= device_flush_iotlb(dev_data, address, size, pasid, gn);
}
WARN_ON(ret);
}
-static void domain_flush_pages(struct protection_domain *domain,
- u64 address, size_t size, int pde)
+void amd_iommu_domain_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size)
{
if (likely(!amd_iommu_np_cache)) {
- __domain_flush_pages(domain, address, size, pde);
+ __domain_flush_pages(domain, address, size);
+
+ /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
+ amd_iommu_domain_flush_complete(domain);
+
return;
}
@@ -1543,16 +1535,20 @@ static void domain_flush_pages(struct protection_domain *domain,
flush_size = 1ul << min_alignment;
- __domain_flush_pages(domain, address, flush_size, pde);
+ __domain_flush_pages(domain, address, flush_size);
address += flush_size;
size -= flush_size;
}
+
+ /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
+ amd_iommu_domain_flush_complete(domain);
}
/* Flush the whole IO/TLB for a given protection domain - including PDE */
-void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
+static void amd_iommu_domain_flush_all(struct protection_domain *domain)
{
- domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
+ amd_iommu_domain_flush_pages(domain, 0,
+ CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
}
void amd_iommu_domain_flush_complete(struct protection_domain *domain)
@@ -1579,8 +1575,7 @@ static void domain_flush_np_cache(struct protection_domain *domain,
unsigned long flags;
spin_lock_irqsave(&domain->lock, flags);
- domain_flush_pages(domain, iova, size, 1);
- amd_iommu_domain_flush_complete(domain);
+ amd_iommu_domain_flush_pages(domain, iova, size);
spin_unlock_irqrestore(&domain->lock, flags);
}
}
@@ -1858,11 +1853,8 @@ static void do_detach(struct iommu_dev_data *dev_data)
/* Flush the DTE entry */
device_flush_dte(dev_data);
- /* Flush IOTLB */
- amd_iommu_domain_flush_tlb_pde(domain);
-
- /* Wait for the flushes to finish */
- amd_iommu_domain_flush_complete(domain);
+ /* Flush IOTLB and wait for the flushes to finish */
+ amd_iommu_domain_flush_all(domain);
/* decrease reference counters - needs to happen after the flushes */
domain->dev_iommu[iommu->index] -= 1;
@@ -1896,15 +1888,6 @@ static int attach_device(struct device *dev,
do_attach(dev_data, domain);
- /*
- * We might boot into a crash-kernel here. The crashed kernel
- * left the caches in the IOMMU dirty. So we have to flush
- * here to evict all dirty stuff.
- */
- amd_iommu_domain_flush_tlb_pde(domain);
-
- amd_iommu_domain_flush_complete(domain);
-
out:
spin_unlock(&dev_data->lock);
@@ -2048,8 +2031,7 @@ void amd_iommu_domain_update(struct protection_domain *domain)
amd_iommu_update_and_flush_device_table(domain);
/* Flush domain TLB(s) and wait for completion */
- amd_iommu_domain_flush_tlb_pde(domain);
- amd_iommu_domain_flush_complete(domain);
+ amd_iommu_domain_flush_all(domain);
}
/*****************************************************************************
@@ -2482,10 +2464,9 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
}
/* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
- if (domain_flush) {
- amd_iommu_domain_flush_tlb_pde(pdomain);
- amd_iommu_domain_flush_complete(pdomain);
- }
+ if (domain_flush)
+ amd_iommu_domain_flush_all(pdomain);
+
pdomain->dirty_tracking = enable;
spin_unlock_irqrestore(&pdomain->lock, flags);
@@ -2588,8 +2569,7 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
- amd_iommu_domain_flush_tlb_pde(dom);
- amd_iommu_domain_flush_complete(dom);
+ amd_iommu_domain_flush_all(dom);
spin_unlock_irqrestore(&dom->lock, flags);
}
@@ -2600,8 +2580,8 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
- domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1);
- amd_iommu_domain_flush_complete(dom);
+ amd_iommu_domain_flush_pages(dom, gather->start,
+ gather->end - gather->start + 1);
spin_unlock_irqrestore(&dom->lock, flags);
}
@@ -2635,7 +2615,7 @@ static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
return true;
}
-const struct iommu_dirty_ops amd_dirty_ops = {
+static const struct iommu_dirty_ops amd_dirty_ops = {
.set_dirty_tracking = amd_iommu_set_dirty_tracking,
.read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
};
@@ -2666,7 +2646,7 @@ const struct iommu_ops amd_iommu_ops = {
};
static int __flush_pasid(struct protection_domain *domain, u32 pasid,
- u64 address, bool size)
+ u64 address, size_t size)
{
struct iommu_dev_data *dev_data;
struct iommu_cmd cmd;
@@ -2675,7 +2655,7 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
if (!(domain->flags & PD_IOMMUV2_MASK))
return -EINVAL;
- build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
+ build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, true);
/*
* IOMMU TLB needs to be flushed before Device TLB to
@@ -2709,8 +2689,8 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
iommu = rlookup_amd_iommu(dev_data->dev);
if (!iommu)
continue;
- build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
- qdep, address, size);
+ build_inv_iotlb_pages(&cmd, dev_data->devid, qdep,
+ address, size, pasid, true);
ret = iommu_queue_command(iommu, &cmd);
if (ret != 0)
@@ -2730,7 +2710,7 @@ out:
static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
u64 address)
{
- return __flush_pasid(domain, pasid, address, false);
+ return __flush_pasid(domain, pasid, address, PAGE_SIZE);
}
int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
@@ -2749,8 +2729,7 @@ int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
{
- return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
- true);
+ return __flush_pasid(domain, pasid, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
}
int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
@@ -3111,8 +3090,8 @@ out:
return index;
}
-static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
- struct irte_ga *irte)
+static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
+ struct irte_ga *irte)
{
struct irq_remap_table *table;
struct irte_ga *entry;
@@ -3139,6 +3118,18 @@ static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
raw_spin_unlock_irqrestore(&table->lock, flags);
+ return 0;
+}
+
+static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
+ struct irte_ga *irte)
+{
+ bool ret;
+
+ ret = __modify_irte_ga(iommu, devid, index, irte);
+ if (ret)
+ return ret;
+
iommu_flush_irt_and_complete(iommu, devid);
return 0;
@@ -3357,7 +3348,7 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
data->irq_2_irte.devid = devid;
data->irq_2_irte.index = index + sub_handle;
- iommu->irte_ops->prepare(data->entry, apic->delivery_mode,
+ iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED,
apic->dest_mode_logical, irq_cfg->vector,
irq_cfg->dest_apicid, devid);
@@ -3634,7 +3625,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
entry->lo.fields_remap.valid = valid;
entry->lo.fields_remap.dm = apic->dest_mode_logical;
- entry->lo.fields_remap.int_type = apic->delivery_mode;
+ entry->lo.fields_remap.int_type = APIC_DELIVERY_MODE_FIXED;
entry->hi.fields.vector = cfg->vector;
entry->lo.fields_remap.destination =
APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
@@ -3822,8 +3813,8 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
}
entry->lo.fields_vapic.is_run = is_run;
- return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
- ir_data->irq_2_irte.index, entry);
+ return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
+ ir_data->irq_2_irte.index, entry);
}
EXPORT_SYMBOL(amd_iommu_update_ga);
#endif