summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:12 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:12 +0000
commit8665bd53f2f2e27e5511d90428cb3f60e6d0ce15 (patch)
tree8d58900dc0ebd4a3011f92c128d2fe45bc7c4bf2 /drivers/iommu/amd
parentAdding debian version 6.7.12-1. (diff)
downloadlinux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.tar.xz
linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.zip
Merging upstream version 6.8.9.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/iommu/amd')
-rw-r--r--drivers/iommu/amd/amd_iommu.h8
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h6
-rw-r--r--drivers/iommu/amd/init.c8
-rw-r--r--drivers/iommu/amd/io_pgtable.c5
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c10
-rw-r--r--drivers/iommu/amd/iommu.c193
6 files changed, 108 insertions, 122 deletions
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 86be1edd50..8b3601f285 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -53,10 +53,16 @@ int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev);
void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev);
int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, u64 address);
+/*
+ * This function flushes all internal caches of
+ * the IOMMU used by this driver.
+ */
+void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
void amd_iommu_domain_update(struct protection_domain *domain);
void amd_iommu_domain_flush_complete(struct protection_domain *domain);
-void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain);
+void amd_iommu_domain_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size);
int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
unsigned long cr3);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 90b7d7950a..809d74faa1 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -902,12 +902,6 @@ extern int amd_iommu_max_glx_val;
extern u64 amd_iommu_efr;
extern u64 amd_iommu_efr2;
-/*
- * This function flushes all internal caches of
- * the IOMMU used by this driver.
- */
-void iommu_flush_all_caches(struct amd_iommu *iommu);
-
static inline int get_ioapic_devid(int id)
{
struct devid_map *entry;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 7f65f3ecb2..40979b0f52 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -2226,7 +2226,7 @@ static int __init amd_iommu_init_pci(void)
init_device_table_dma(pci_seg);
for_each_iommu(iommu)
- iommu_flush_all_caches(iommu);
+ amd_iommu_flush_all_caches(iommu);
print_iommu_info();
@@ -2776,7 +2776,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
iommu_enable(iommu);
- iommu_flush_all_caches(iommu);
+ amd_iommu_flush_all_caches(iommu);
}
/*
@@ -2832,7 +2832,7 @@ static void early_enable_iommus(void)
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
iommu_set_device_table(iommu);
- iommu_flush_all_caches(iommu);
+ amd_iommu_flush_all_caches(iommu);
}
}
}
@@ -3296,7 +3296,7 @@ static int __init state_next(void)
uninit_device_table_dma(pci_seg);
for_each_iommu(iommu)
- iommu_flush_all_caches(iommu);
+ amd_iommu_flush_all_caches(iommu);
}
}
return ret;
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 6c0621f6f5..2a0d1e97e5 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -369,6 +369,8 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
bool updated = false;
u64 __pte, *pte;
int ret, i, count;
+ size_t size = pgcount << __ffs(pgsize);
+ unsigned long o_iova = iova;
BUG_ON(!IS_ALIGNED(iova, pgsize));
BUG_ON(!IS_ALIGNED(paddr, pgsize));
@@ -424,8 +426,7 @@ out:
* Updates and flushing already happened in
* increase_address_space().
*/
- amd_iommu_domain_flush_tlb_pde(dom);
- amd_iommu_domain_flush_complete(dom);
+ amd_iommu_domain_flush_pages(dom, o_iova, size);
spin_unlock_irqrestore(&dom->lock, flags);
}
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index f818a7e254..6d69ba6074 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -244,7 +244,6 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
unsigned long mapped_size = 0;
unsigned long o_iova = iova;
size_t size = pgcount << __ffs(pgsize);
- int count = 0;
int ret = 0;
bool updated = false;
@@ -265,19 +264,14 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
*pte = set_pte_attr(paddr, map_size, prot);
- count++;
iova += map_size;
paddr += map_size;
mapped_size += map_size;
}
out:
- if (updated) {
- if (count > 1)
- amd_iommu_flush_tlb(&pdom->domain, 0);
- else
- amd_iommu_flush_page(&pdom->domain, 0, o_iova);
- }
+ if (updated)
+ amd_iommu_domain_flush_pages(pdom, o_iova, size);
if (mapped)
*mapped += mapped_size;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index fcc987f5d4..4283dd8191 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -64,7 +64,7 @@ LIST_HEAD(hpet_map);
LIST_HEAD(acpihid_map);
const struct iommu_ops amd_iommu_ops;
-const struct iommu_dirty_ops amd_dirty_ops;
+static const struct iommu_dirty_ops amd_dirty_ops;
int amd_iommu_max_glx_val = -1;
@@ -85,6 +85,11 @@ static void detach_device(struct device *dev);
*
****************************************************************************/
+static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
+{
+ return (pdom && (pdom->flags & PD_IOMMUV2_MASK));
+}
+
static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
@@ -551,8 +556,6 @@ static void amd_iommu_uninit_device(struct device *dev)
if (dev_data->domain)
detach_device(dev);
- dev_iommu_priv_set(dev, NULL);
-
/*
* We keep dev_data around for unplugged devices and reuse it when the
* device is re-plugged - not doing so would introduce a ton of races.
@@ -1124,68 +1127,44 @@ static inline u64 build_inv_address(u64 address, size_t size)
}
static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
- size_t size, u16 domid, int pde)
+ size_t size, u16 domid,
+ ioasid_t pasid, bool gn)
{
u64 inv_address = build_inv_address(address, size);
memset(cmd, 0, sizeof(*cmd));
+
cmd->data[1] |= domid;
cmd->data[2] = lower_32_bits(inv_address);
cmd->data[3] = upper_32_bits(inv_address);
+ /* PDE bit - we want to flush everything, not only the PTEs */
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+ if (gn) {
+ cmd->data[0] |= pasid;
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ }
CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
- if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
}
static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
- u64 address, size_t size)
+ u64 address, size_t size,
+ ioasid_t pasid, bool gn)
{
u64 inv_address = build_inv_address(address, size);
memset(cmd, 0, sizeof(*cmd));
+
cmd->data[0] = devid;
cmd->data[0] |= (qdep & 0xff) << 24;
cmd->data[1] = devid;
cmd->data[2] = lower_32_bits(inv_address);
cmd->data[3] = upper_32_bits(inv_address);
- CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
-}
-
-static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
- u64 address, bool size)
-{
- memset(cmd, 0, sizeof(*cmd));
-
- address &= ~(0xfffULL);
-
- cmd->data[0] = pasid;
- cmd->data[1] = domid;
- cmd->data[2] = lower_32_bits(address);
- cmd->data[3] = upper_32_bits(address);
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
- if (size)
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
- CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
-}
-
-static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
- int qdep, u64 address, bool size)
-{
- memset(cmd, 0, sizeof(*cmd));
-
- address &= ~(0xfffULL);
+ if (gn) {
+ cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
+ cmd->data[1] |= (pasid & 0xff) << 16;
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ }
- cmd->data[0] = devid;
- cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
- cmd->data[0] |= (qdep & 0xff) << 24;
- cmd->data[1] = devid;
- cmd->data[1] |= (pasid & 0xff) << 16;
- cmd->data[2] = lower_32_bits(address);
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
- cmd->data[3] = upper_32_bits(address);
- if (size)
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
}
@@ -1341,7 +1320,7 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
struct iommu_cmd cmd;
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
- dom_id, 1);
+ dom_id, IOMMU_NO_PASID, false);
iommu_queue_command(iommu, &cmd);
}
@@ -1353,7 +1332,7 @@ static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
struct iommu_cmd cmd;
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
- dom_id, 1);
+ dom_id, IOMMU_NO_PASID, false);
iommu_queue_command(iommu, &cmd);
iommu_completion_wait(iommu);
@@ -1392,7 +1371,7 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
iommu_completion_wait(iommu);
}
-void iommu_flush_all_caches(struct amd_iommu *iommu)
+void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
{
if (check_feature(FEATURE_IA)) {
amd_iommu_flush_all(iommu);
@@ -1406,8 +1385,8 @@ void iommu_flush_all_caches(struct amd_iommu *iommu)
/*
* Command send function for flushing on-device TLB
*/
-static int device_flush_iotlb(struct iommu_dev_data *dev_data,
- u64 address, size_t size)
+static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address,
+ size_t size, ioasid_t pasid, bool gn)
{
struct amd_iommu *iommu;
struct iommu_cmd cmd;
@@ -1418,7 +1397,8 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
if (!iommu)
return -EINVAL;
- build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
+ build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address,
+ size, pasid, gn);
return iommu_queue_command(iommu, &cmd);
}
@@ -1464,8 +1444,11 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
return ret;
}
- if (dev_data->ats_enabled)
- ret = device_flush_iotlb(dev_data, 0, ~0UL);
+ if (dev_data->ats_enabled) {
+ /* Invalidate the entire contents of an IOTLB */
+ ret = device_flush_iotlb(dev_data, 0, ~0UL,
+ IOMMU_NO_PASID, false);
+ }
return ret;
}
@@ -1476,13 +1459,18 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
* page. Otherwise it flushes the whole TLB of the IOMMU.
*/
static void __domain_flush_pages(struct protection_domain *domain,
- u64 address, size_t size, int pde)
+ u64 address, size_t size)
{
struct iommu_dev_data *dev_data;
struct iommu_cmd cmd;
int ret = 0, i;
+ ioasid_t pasid = IOMMU_NO_PASID;
+ bool gn = false;
+
+ if (pdom_is_v2_pgtbl_mode(domain))
+ gn = true;
- build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
+ build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, gn);
for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
if (!domain->dev_iommu[i])
@@ -1500,17 +1488,21 @@ static void __domain_flush_pages(struct protection_domain *domain,
if (!dev_data->ats_enabled)
continue;
- ret |= device_flush_iotlb(dev_data, address, size);
+ ret |= device_flush_iotlb(dev_data, address, size, pasid, gn);
}
WARN_ON(ret);
}
-static void domain_flush_pages(struct protection_domain *domain,
- u64 address, size_t size, int pde)
+void amd_iommu_domain_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size)
{
if (likely(!amd_iommu_np_cache)) {
- __domain_flush_pages(domain, address, size, pde);
+ __domain_flush_pages(domain, address, size);
+
+ /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
+ amd_iommu_domain_flush_complete(domain);
+
return;
}
@@ -1543,16 +1535,20 @@ static void domain_flush_pages(struct protection_domain *domain,
flush_size = 1ul << min_alignment;
- __domain_flush_pages(domain, address, flush_size, pde);
+ __domain_flush_pages(domain, address, flush_size);
address += flush_size;
size -= flush_size;
}
+
+ /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
+ amd_iommu_domain_flush_complete(domain);
}
/* Flush the whole IO/TLB for a given protection domain - including PDE */
-void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
+static void amd_iommu_domain_flush_all(struct protection_domain *domain)
{
- domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
+ amd_iommu_domain_flush_pages(domain, 0,
+ CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
}
void amd_iommu_domain_flush_complete(struct protection_domain *domain)
@@ -1579,8 +1575,7 @@ static void domain_flush_np_cache(struct protection_domain *domain,
unsigned long flags;
spin_lock_irqsave(&domain->lock, flags);
- domain_flush_pages(domain, iova, size, 1);
- amd_iommu_domain_flush_complete(domain);
+ amd_iommu_domain_flush_pages(domain, iova, size);
spin_unlock_irqrestore(&domain->lock, flags);
}
}
@@ -1858,11 +1853,8 @@ static void do_detach(struct iommu_dev_data *dev_data)
/* Flush the DTE entry */
device_flush_dte(dev_data);
- /* Flush IOTLB */
- amd_iommu_domain_flush_tlb_pde(domain);
-
- /* Wait for the flushes to finish */
- amd_iommu_domain_flush_complete(domain);
+ /* Flush IOTLB and wait for the flushes to finish */
+ amd_iommu_domain_flush_all(domain);
/* decrease reference counters - needs to happen after the flushes */
domain->dev_iommu[iommu->index] -= 1;
@@ -1896,15 +1888,6 @@ static int attach_device(struct device *dev,
do_attach(dev_data, domain);
- /*
- * We might boot into a crash-kernel here. The crashed kernel
- * left the caches in the IOMMU dirty. So we have to flush
- * here to evict all dirty stuff.
- */
- amd_iommu_domain_flush_tlb_pde(domain);
-
- amd_iommu_domain_flush_complete(domain);
-
out:
spin_unlock(&dev_data->lock);
@@ -2048,8 +2031,7 @@ void amd_iommu_domain_update(struct protection_domain *domain)
amd_iommu_update_and_flush_device_table(domain);
/* Flush domain TLB(s) and wait for completion */
- amd_iommu_domain_flush_tlb_pde(domain);
- amd_iommu_domain_flush_complete(domain);
+ amd_iommu_domain_flush_all(domain);
}
/*****************************************************************************
@@ -2482,10 +2464,9 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
}
/* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
- if (domain_flush) {
- amd_iommu_domain_flush_tlb_pde(pdomain);
- amd_iommu_domain_flush_complete(pdomain);
- }
+ if (domain_flush)
+ amd_iommu_domain_flush_all(pdomain);
+
pdomain->dirty_tracking = enable;
spin_unlock_irqrestore(&pdomain->lock, flags);
@@ -2588,8 +2569,7 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
- amd_iommu_domain_flush_tlb_pde(dom);
- amd_iommu_domain_flush_complete(dom);
+ amd_iommu_domain_flush_all(dom);
spin_unlock_irqrestore(&dom->lock, flags);
}
@@ -2600,8 +2580,8 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
- domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1);
- amd_iommu_domain_flush_complete(dom);
+ amd_iommu_domain_flush_pages(dom, gather->start,
+ gather->end - gather->start + 1);
spin_unlock_irqrestore(&dom->lock, flags);
}
@@ -2635,7 +2615,7 @@ static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
return true;
}
-const struct iommu_dirty_ops amd_dirty_ops = {
+static const struct iommu_dirty_ops amd_dirty_ops = {
.set_dirty_tracking = amd_iommu_set_dirty_tracking,
.read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
};
@@ -2666,7 +2646,7 @@ const struct iommu_ops amd_iommu_ops = {
};
static int __flush_pasid(struct protection_domain *domain, u32 pasid,
- u64 address, bool size)
+ u64 address, size_t size)
{
struct iommu_dev_data *dev_data;
struct iommu_cmd cmd;
@@ -2675,7 +2655,7 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
if (!(domain->flags & PD_IOMMUV2_MASK))
return -EINVAL;
- build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
+ build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, true);
/*
* IOMMU TLB needs to be flushed before Device TLB to
@@ -2709,8 +2689,8 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
iommu = rlookup_amd_iommu(dev_data->dev);
if (!iommu)
continue;
- build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
- qdep, address, size);
+ build_inv_iotlb_pages(&cmd, dev_data->devid, qdep,
+ address, size, pasid, true);
ret = iommu_queue_command(iommu, &cmd);
if (ret != 0)
@@ -2730,7 +2710,7 @@ out:
static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
u64 address)
{
- return __flush_pasid(domain, pasid, address, false);
+ return __flush_pasid(domain, pasid, address, PAGE_SIZE);
}
int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
@@ -2749,8 +2729,7 @@ int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
{
- return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
- true);
+ return __flush_pasid(domain, pasid, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
}
int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
@@ -3111,8 +3090,8 @@ out:
return index;
}
-static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
- struct irte_ga *irte)
+static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
+ struct irte_ga *irte)
{
struct irq_remap_table *table;
struct irte_ga *entry;
@@ -3139,6 +3118,18 @@ static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
raw_spin_unlock_irqrestore(&table->lock, flags);
+ return 0;
+}
+
+static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
+ struct irte_ga *irte)
+{
+ bool ret;
+
+ ret = __modify_irte_ga(iommu, devid, index, irte);
+ if (ret)
+ return ret;
+
iommu_flush_irt_and_complete(iommu, devid);
return 0;
@@ -3357,7 +3348,7 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
data->irq_2_irte.devid = devid;
data->irq_2_irte.index = index + sub_handle;
- iommu->irte_ops->prepare(data->entry, apic->delivery_mode,
+ iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED,
apic->dest_mode_logical, irq_cfg->vector,
irq_cfg->dest_apicid, devid);
@@ -3634,7 +3625,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
entry->lo.fields_remap.valid = valid;
entry->lo.fields_remap.dm = apic->dest_mode_logical;
- entry->lo.fields_remap.int_type = apic->delivery_mode;
+ entry->lo.fields_remap.int_type = APIC_DELIVERY_MODE_FIXED;
entry->hi.fields.vector = cfg->vector;
entry->lo.fields_remap.destination =
APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
@@ -3822,8 +3813,8 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
}
entry->lo.fields_vapic.is_run = is_run;
- return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
- ir_data->irq_2_irte.index, entry);
+ return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
+ ir_data->irq_2_irte.index, entry);
}
EXPORT_SYMBOL(amd_iommu_update_ga);
#endif