summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/amd')
-rw-r--r--drivers/iommu/amd/Kconfig44
-rw-r--r--drivers/iommu/amd/Makefile4
-rw-r--r--drivers/iommu/amd/amd_iommu.h153
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h1052
-rw-r--r--drivers/iommu/amd/debugfs.c32
-rw-r--r--drivers/iommu/amd/init.c3841
-rw-r--r--drivers/iommu/amd/io_pgtable.c537
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c414
-rw-r--r--drivers/iommu/amd/iommu.c3783
-rw-r--r--drivers/iommu/amd/iommu_v2.c996
-rw-r--r--drivers/iommu/amd/quirks.c105
11 files changed, 10961 insertions, 0 deletions
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
new file mode 100644
index 0000000000..9b5fc3356b
--- /dev/null
+++ b/drivers/iommu/amd/Kconfig
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# AMD IOMMU support
+config AMD_IOMMU
+ bool "AMD IOMMU support"
+ select SWIOTLB
+ select PCI_MSI
+ select PCI_ATS
+ select PCI_PRI
+ select PCI_PASID
+ select IOMMU_API
+ select IOMMU_IOVA
+ select IOMMU_IO_PGTABLE
+ depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
+ help
+ With this option you can enable support for AMD IOMMU hardware in
+ your system. An IOMMU is a hardware component which provides
+ remapping of DMA memory accesses from devices. With an AMD IOMMU you
+ can isolate the DMA memory of different devices and protect the
+ system from misbehaving device drivers or hardware.
+
+ You can find out if your system has an AMD IOMMU if you look into
+ your BIOS for an option to enable it or if you have an IVRS ACPI
+ table.
+
+config AMD_IOMMU_V2
+ tristate "AMD IOMMU Version 2 driver"
+ depends on AMD_IOMMU
+ select MMU_NOTIFIER
+ help
+ This option enables support for the AMD IOMMUv2 features of the IOMMU
+ hardware. Select this option if you want to use devices that support
+ the PCI PRI and PASID interface.
+
+config AMD_IOMMU_DEBUGFS
+ bool "Enable AMD IOMMU internals in DebugFS"
+ depends on AMD_IOMMU && IOMMU_DEBUGFS
+ help
+ !!!WARNING!!! !!!WARNING!!! !!!WARNING!!! !!!WARNING!!!
+
+ DO NOT ENABLE THIS OPTION UNLESS YOU REALLY, -REALLY- KNOW WHAT YOU ARE DOING!!!
+ Exposes AMD IOMMU device internals in DebugFS.
+
+ This option is -NOT- intended for production environments, and should
+ not generally be enabled.
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
new file mode 100644
index 0000000000..773d8aa002
--- /dev/null
+++ b/drivers/iommu/amd/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o
+obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
+obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
new file mode 100644
index 0000000000..e2857109e9
--- /dev/null
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+#ifndef AMD_IOMMU_H
+#define AMD_IOMMU_H
+
+#include <linux/iommu.h>
+
+#include "amd_iommu_types.h"
+
+irqreturn_t amd_iommu_int_thread(int irq, void *data);
+irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data);
+irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data);
+irqreturn_t amd_iommu_int_thread_galog(int irq, void *data);
+irqreturn_t amd_iommu_int_handler(int irq, void *data);
+void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
+void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
+void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
+void amd_iommu_restart_ppr_log(struct amd_iommu *iommu);
+void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
+
+#ifdef CONFIG_AMD_IOMMU_DEBUGFS
+void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
+#else
+static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
+#endif
+
+/* Needed for interrupt remapping */
+int amd_iommu_prepare(void);
+int amd_iommu_enable(void);
+void amd_iommu_disable(void);
+int amd_iommu_reenable(int mode);
+int amd_iommu_enable_faulting(void);
+extern int amd_iommu_guest_ir;
+extern enum io_pgtable_fmt amd_iommu_pgtable;
+extern int amd_iommu_gpt_level;
+
+/* IOMMUv2 specific functions */
+struct iommu_domain;
+
+bool amd_iommu_v2_supported(void);
+struct amd_iommu *get_amd_iommu(unsigned int idx);
+u8 amd_iommu_pc_get_max_banks(unsigned int idx);
+bool amd_iommu_pc_supported(void);
+u8 amd_iommu_pc_get_max_counters(unsigned int idx);
+int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+ u8 fxn, u64 *value);
+int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+ u8 fxn, u64 *value);
+
+int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
+int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
+void amd_iommu_domain_direct_map(struct iommu_domain *dom);
+int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
+int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, u64 address);
+void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
+void amd_iommu_domain_update(struct protection_domain *domain);
+void amd_iommu_domain_flush_complete(struct protection_domain *domain);
+void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain);
+int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
+int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
+ unsigned long cr3);
+int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid);
+
+#ifdef CONFIG_IRQ_REMAP
+int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
+#else
+static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
+{
+ return 0;
+}
+#endif
+
+#define PPR_SUCCESS 0x0
+#define PPR_INVALID 0x1
+#define PPR_FAILURE 0xf
+
+int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
+ int status, int tag);
+
+static inline bool is_rd890_iommu(struct pci_dev *pdev)
+{
+ return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
+ (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
+}
+
+static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask)
+{
+ return !!(iommu->features & mask);
+}
+
+static inline u64 iommu_virt_to_phys(void *vaddr)
+{
+ return (u64)__sme_set(virt_to_phys(vaddr));
+}
+
+static inline void *iommu_phys_to_virt(unsigned long paddr)
+{
+ return phys_to_virt(__sme_clr(paddr));
+}
+
+static inline
+void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
+{
+ atomic64_set(&domain->iop.pt_root, root);
+ domain->iop.root = (u64 *)(root & PAGE_MASK);
+ domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */
+}
+
+static inline
+void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
+{
+ amd_iommu_domain_set_pt_root(domain, 0);
+}
+
+static inline int get_pci_sbdf_id(struct pci_dev *pdev)
+{
+ int seg = pci_domain_nr(pdev->bus);
+ u16 devid = pci_dev_id(pdev);
+
+ return PCI_SEG_DEVID_TO_SBDF(seg, devid);
+}
+
+static inline void *alloc_pgtable_page(int nid, gfp_t gfp)
+{
+ struct page *page;
+
+ page = alloc_pages_node(nid, gfp | __GFP_ZERO, 0);
+ return page ? page_address(page) : NULL;
+}
+
+bool translation_pre_enabled(struct amd_iommu *iommu);
+bool amd_iommu_is_attach_deferred(struct device *dev);
+int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line);
+
+#ifdef CONFIG_DMI
+void amd_iommu_apply_ivrs_quirks(void);
+#else
+static inline void amd_iommu_apply_ivrs_quirks(void) { }
+#endif
+
+void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
+ u64 *root, int mode);
+struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
+
+extern u64 amd_iommu_efr;
+extern u64 amd_iommu_efr2;
+
+extern bool amd_iommu_snp_en;
+#endif
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
new file mode 100644
index 0000000000..7dc30c2b56
--- /dev/null
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -0,0 +1,1052 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <jroedel@suse.de>
+ * Leo Duran <leo.duran@amd.com>
+ */
+
+#ifndef _ASM_X86_AMD_IOMMU_TYPES_H
+#define _ASM_X86_AMD_IOMMU_TYPES_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/msi.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/irqreturn.h>
+#include <linux/io-pgtable.h>
+
+/*
+ * Maximum number of IOMMUs supported
+ */
+#define MAX_IOMMUS 32
+
+/*
+ * some size calculation constants
+ */
+#define DEV_TABLE_ENTRY_SIZE 32
+#define ALIAS_TABLE_ENTRY_SIZE 2
+#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
+
+/* Capability offsets used by the driver */
+#define MMIO_CAP_HDR_OFFSET 0x00
+#define MMIO_RANGE_OFFSET 0x0c
+#define MMIO_MISC_OFFSET 0x10
+
+/* Masks, shifts and macros to parse the device range capability */
+#define MMIO_RANGE_LD_MASK 0xff000000
+#define MMIO_RANGE_FD_MASK 0x00ff0000
+#define MMIO_RANGE_BUS_MASK 0x0000ff00
+#define MMIO_RANGE_LD_SHIFT 24
+#define MMIO_RANGE_FD_SHIFT 16
+#define MMIO_RANGE_BUS_SHIFT 8
+#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
+#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
+#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
+#define MMIO_MSI_NUM(x) ((x) & 0x1f)
+
+/* Flag masks for the AMD IOMMU exclusion range */
+#define MMIO_EXCL_ENABLE_MASK 0x01ULL
+#define MMIO_EXCL_ALLOW_MASK 0x02ULL
+
+/* Used offsets into the MMIO space */
+#define MMIO_DEV_TABLE_OFFSET 0x0000
+#define MMIO_CMD_BUF_OFFSET 0x0008
+#define MMIO_EVT_BUF_OFFSET 0x0010
+#define MMIO_CONTROL_OFFSET 0x0018
+#define MMIO_EXCL_BASE_OFFSET 0x0020
+#define MMIO_EXCL_LIMIT_OFFSET 0x0028
+#define MMIO_EXT_FEATURES 0x0030
+#define MMIO_PPR_LOG_OFFSET 0x0038
+#define MMIO_GA_LOG_BASE_OFFSET 0x00e0
+#define MMIO_GA_LOG_TAIL_OFFSET 0x00e8
+#define MMIO_MSI_ADDR_LO_OFFSET 0x015C
+#define MMIO_MSI_ADDR_HI_OFFSET 0x0160
+#define MMIO_MSI_DATA_OFFSET 0x0164
+#define MMIO_INTCAPXT_EVT_OFFSET 0x0170
+#define MMIO_INTCAPXT_PPR_OFFSET 0x0178
+#define MMIO_INTCAPXT_GALOG_OFFSET 0x0180
+#define MMIO_EXT_FEATURES2 0x01A0
+#define MMIO_CMD_HEAD_OFFSET 0x2000
+#define MMIO_CMD_TAIL_OFFSET 0x2008
+#define MMIO_EVT_HEAD_OFFSET 0x2010
+#define MMIO_EVT_TAIL_OFFSET 0x2018
+#define MMIO_STATUS_OFFSET 0x2020
+#define MMIO_PPR_HEAD_OFFSET 0x2030
+#define MMIO_PPR_TAIL_OFFSET 0x2038
+#define MMIO_GA_HEAD_OFFSET 0x2040
+#define MMIO_GA_TAIL_OFFSET 0x2048
+#define MMIO_CNTR_CONF_OFFSET 0x4000
+#define MMIO_CNTR_REG_OFFSET 0x40000
+#define MMIO_REG_END_OFFSET 0x80000
+
+
+
+/* Extended Feature Bits */
+#define FEATURE_PREFETCH BIT_ULL(0)
+#define FEATURE_PPR BIT_ULL(1)
+#define FEATURE_X2APIC BIT_ULL(2)
+#define FEATURE_NX BIT_ULL(3)
+#define FEATURE_GT BIT_ULL(4)
+#define FEATURE_IA BIT_ULL(6)
+#define FEATURE_GA BIT_ULL(7)
+#define FEATURE_HE BIT_ULL(8)
+#define FEATURE_PC BIT_ULL(9)
+#define FEATURE_GATS_SHIFT (12)
+#define FEATURE_GATS_MASK (3ULL)
+#define FEATURE_GAM_VAPIC BIT_ULL(21)
+#define FEATURE_GIOSUP BIT_ULL(48)
+#define FEATURE_EPHSUP BIT_ULL(50)
+#define FEATURE_SNP BIT_ULL(63)
+
+#define FEATURE_PASID_SHIFT 32
+#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
+
+#define FEATURE_GLXVAL_SHIFT 14
+#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
+
+/* Extended Feature 2 Bits */
+#define FEATURE_SNPAVICSUP_SHIFT 5
+#define FEATURE_SNPAVICSUP_MASK (0x07ULL << FEATURE_SNPAVICSUP_SHIFT)
+#define FEATURE_SNPAVICSUP_GAM(x) \
+ ((x & FEATURE_SNPAVICSUP_MASK) >> FEATURE_SNPAVICSUP_SHIFT == 0x1)
+
+/* Note:
+ * The current driver only support 16-bit PASID.
+ * Currently, hardware only implement upto 16-bit PASID
+ * even though the spec says it could have upto 20 bits.
+ */
+#define PASID_MASK 0x0000ffff
+
+/* MMIO status bits */
+#define MMIO_STATUS_EVT_OVERFLOW_MASK BIT(0)
+#define MMIO_STATUS_EVT_INT_MASK BIT(1)
+#define MMIO_STATUS_COM_WAIT_INT_MASK BIT(2)
+#define MMIO_STATUS_EVT_RUN_MASK BIT(3)
+#define MMIO_STATUS_PPR_OVERFLOW_MASK BIT(5)
+#define MMIO_STATUS_PPR_INT_MASK BIT(6)
+#define MMIO_STATUS_PPR_RUN_MASK BIT(7)
+#define MMIO_STATUS_GALOG_RUN_MASK BIT(8)
+#define MMIO_STATUS_GALOG_OVERFLOW_MASK BIT(9)
+#define MMIO_STATUS_GALOG_INT_MASK BIT(10)
+
+/* event logging constants */
+#define EVENT_ENTRY_SIZE 0x10
+#define EVENT_TYPE_SHIFT 28
+#define EVENT_TYPE_MASK 0xf
+#define EVENT_TYPE_ILL_DEV 0x1
+#define EVENT_TYPE_IO_FAULT 0x2
+#define EVENT_TYPE_DEV_TAB_ERR 0x3
+#define EVENT_TYPE_PAGE_TAB_ERR 0x4
+#define EVENT_TYPE_ILL_CMD 0x5
+#define EVENT_TYPE_CMD_HARD_ERR 0x6
+#define EVENT_TYPE_IOTLB_INV_TO 0x7
+#define EVENT_TYPE_INV_DEV_REQ 0x8
+#define EVENT_TYPE_INV_PPR_REQ 0x9
+#define EVENT_TYPE_RMP_FAULT 0xd
+#define EVENT_TYPE_RMP_HW_ERR 0xe
+#define EVENT_DEVID_MASK 0xffff
+#define EVENT_DEVID_SHIFT 0
+#define EVENT_DOMID_MASK_LO 0xffff
+#define EVENT_DOMID_MASK_HI 0xf0000
+#define EVENT_FLAGS_MASK 0xfff
+#define EVENT_FLAGS_SHIFT 0x10
+#define EVENT_FLAG_RW 0x020
+#define EVENT_FLAG_I 0x008
+
+/* feature control bits */
+#define CONTROL_IOMMU_EN 0
+#define CONTROL_HT_TUN_EN 1
+#define CONTROL_EVT_LOG_EN 2
+#define CONTROL_EVT_INT_EN 3
+#define CONTROL_COMWAIT_EN 4
+#define CONTROL_INV_TIMEOUT 5
+#define CONTROL_PASSPW_EN 8
+#define CONTROL_RESPASSPW_EN 9
+#define CONTROL_COHERENT_EN 10
+#define CONTROL_ISOC_EN 11
+#define CONTROL_CMDBUF_EN 12
+#define CONTROL_PPRLOG_EN 13
+#define CONTROL_PPRINT_EN 14
+#define CONTROL_PPR_EN 15
+#define CONTROL_GT_EN 16
+#define CONTROL_GA_EN 17
+#define CONTROL_GAM_EN 25
+#define CONTROL_GALOG_EN 28
+#define CONTROL_GAINT_EN 29
+#define CONTROL_XT_EN 50
+#define CONTROL_INTCAPXT_EN 51
+#define CONTROL_IRTCACHEDIS 59
+#define CONTROL_SNPAVIC_EN 61
+
+#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
+#define CTRL_INV_TO_NONE 0
+#define CTRL_INV_TO_1MS 1
+#define CTRL_INV_TO_10MS 2
+#define CTRL_INV_TO_100MS 3
+#define CTRL_INV_TO_1S 4
+#define CTRL_INV_TO_10S 5
+#define CTRL_INV_TO_100S 6
+
+/* command specific defines */
+#define CMD_COMPL_WAIT 0x01
+#define CMD_INV_DEV_ENTRY 0x02
+#define CMD_INV_IOMMU_PAGES 0x03
+#define CMD_INV_IOTLB_PAGES 0x04
+#define CMD_INV_IRT 0x05
+#define CMD_COMPLETE_PPR 0x07
+#define CMD_INV_ALL 0x08
+
+#define CMD_COMPL_WAIT_STORE_MASK 0x01
+#define CMD_COMPL_WAIT_INT_MASK 0x02
+#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
+#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
+#define CMD_INV_IOMMU_PAGES_GN_MASK 0x04
+
+#define PPR_STATUS_MASK 0xf
+#define PPR_STATUS_SHIFT 12
+
+#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
+
+/* macros and definitions for device table entries */
+#define DEV_ENTRY_VALID 0x00
+#define DEV_ENTRY_TRANSLATION 0x01
+#define DEV_ENTRY_PPR 0x34
+#define DEV_ENTRY_IR 0x3d
+#define DEV_ENTRY_IW 0x3e
+#define DEV_ENTRY_NO_PAGE_FAULT 0x62
+#define DEV_ENTRY_EX 0x67
+#define DEV_ENTRY_SYSMGT1 0x68
+#define DEV_ENTRY_SYSMGT2 0x69
+#define DEV_ENTRY_IRQ_TBL_EN 0x80
+#define DEV_ENTRY_INIT_PASS 0xb8
+#define DEV_ENTRY_EINT_PASS 0xb9
+#define DEV_ENTRY_NMI_PASS 0xba
+#define DEV_ENTRY_LINT0_PASS 0xbe
+#define DEV_ENTRY_LINT1_PASS 0xbf
+#define DEV_ENTRY_MODE_MASK 0x07
+#define DEV_ENTRY_MODE_SHIFT 0x09
+
+#define MAX_DEV_TABLE_ENTRIES 0xffff
+
+/* constants to configure the command buffer */
+#define CMD_BUFFER_SIZE 8192
+#define CMD_BUFFER_UNINITIALIZED 1
+#define CMD_BUFFER_ENTRIES 512
+#define MMIO_CMD_SIZE_SHIFT 56
+#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
+
+/* constants for event buffer handling */
+#define EVT_BUFFER_SIZE 8192 /* 512 entries */
+#define EVT_LEN_MASK (0x9ULL << 56)
+
+/* Constants for PPR Log handling */
+#define PPR_LOG_ENTRIES 512
+#define PPR_LOG_SIZE_SHIFT 56
+#define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT)
+#define PPR_ENTRY_SIZE 16
+#define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
+
+#define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
+#define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL)
+#define PPR_DEVID(x) ((x) & 0xffffULL)
+#define PPR_TAG(x) (((x) >> 32) & 0x3ffULL)
+#define PPR_PASID1(x) (((x) >> 16) & 0xffffULL)
+#define PPR_PASID2(x) (((x) >> 42) & 0xfULL)
+#define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x))
+
+#define PPR_REQ_FAULT 0x01
+
+/* Constants for GA Log handling */
+#define GA_LOG_ENTRIES 512
+#define GA_LOG_SIZE_SHIFT 56
+#define GA_LOG_SIZE_512 (0x8ULL << GA_LOG_SIZE_SHIFT)
+#define GA_ENTRY_SIZE 8
+#define GA_LOG_SIZE (GA_ENTRY_SIZE * GA_LOG_ENTRIES)
+
+#define GA_TAG(x) (u32)(x & 0xffffffffULL)
+#define GA_DEVID(x) (u16)(((x) >> 32) & 0xffffULL)
+#define GA_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
+
+#define GA_GUEST_NR 0x1
+
+#define IOMMU_IN_ADDR_BIT_SIZE 52
+#define IOMMU_OUT_ADDR_BIT_SIZE 52
+
+/*
+ * This bitmap is used to advertise the page sizes our hardware support
+ * to the IOMMU core, which will then use this information to split
+ * physically contiguous memory regions it is mapping into page sizes
+ * that we support.
+ *
+ * 512GB Pages are not supported due to a hardware bug
+ */
+#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
+/* 4K, 2MB, 1G page sizes are supported */
+#define AMD_IOMMU_PGSIZES_V2 (PAGE_SIZE | (1ULL << 21) | (1ULL << 30))
+
+/* Bit value definition for dte irq remapping fields*/
+#define DTE_IRQ_PHYS_ADDR_MASK GENMASK_ULL(51, 6)
+#define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60)
+#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
+#define DTE_IRQ_REMAP_ENABLE 1ULL
+
+/*
+ * AMD IOMMU hardware only support 512 IRTEs despite
+ * the architectural limitation of 2048 entries.
+ */
+#define DTE_INTTAB_ALIGNMENT 128
+#define DTE_INTTABLEN_VALUE 9ULL
+#define DTE_INTTABLEN (DTE_INTTABLEN_VALUE << 1)
+#define DTE_INTTABLEN_MASK (0xfULL << 1)
+#define MAX_IRQS_PER_TABLE (1 << DTE_INTTABLEN_VALUE)
+
+#define PAGE_MODE_NONE 0x00
+#define PAGE_MODE_1_LEVEL 0x01
+#define PAGE_MODE_2_LEVEL 0x02
+#define PAGE_MODE_3_LEVEL 0x03
+#define PAGE_MODE_4_LEVEL 0x04
+#define PAGE_MODE_5_LEVEL 0x05
+#define PAGE_MODE_6_LEVEL 0x06
+#define PAGE_MODE_7_LEVEL 0x07
+
+#define GUEST_PGTABLE_4_LEVEL 0x00
+#define GUEST_PGTABLE_5_LEVEL 0x01
+
+#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9))
+#define PM_LEVEL_SIZE(x) (((x) < 6) ? \
+ ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
+ (0xffffffffffffffffULL))
+#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
+#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL)
+#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \
+ IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW)
+#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL)
+
+#define PM_MAP_4k 0
+#define PM_ADDR_MASK 0x000ffffffffff000ULL
+#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \
+ (~((1ULL << (12 + ((lvl) * 9))) - 1)))
+#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
+
+/*
+ * Returns the page table level to use for a given page size
+ * Pagesize is expected to be a power-of-two
+ */
+#define PAGE_SIZE_LEVEL(pagesize) \
+ ((__ffs(pagesize) - 12) / 9)
+/*
+ * Returns the number of ptes to use for a given page size
+ * Pagesize is expected to be a power-of-two
+ */
+#define PAGE_SIZE_PTE_COUNT(pagesize) \
+ (1ULL << ((__ffs(pagesize) - 12) % 9))
+
+/*
+ * Aligns a given io-virtual address to a given page size
+ * Pagesize is expected to be a power-of-two
+ */
+#define PAGE_SIZE_ALIGN(address, pagesize) \
+ ((address) & ~((pagesize) - 1))
+/*
+ * Creates an IOMMU PTE for an address and a given pagesize
+ * The PTE has no permission bits set
+ * Pagesize is expected to be a power-of-two larger than 4096
+ */
+#define PAGE_SIZE_PTE(address, pagesize) \
+ (((address) | ((pagesize) - 1)) & \
+ (~(pagesize >> 1)) & PM_ADDR_MASK)
+
+/*
+ * Takes a PTE value with mode=0x07 and returns the page size it maps
+ */
+#define PTE_PAGE_SIZE(pte) \
+ (1ULL << (1 + ffz(((pte) | 0xfffULL))))
+
+/*
+ * Takes a page-table level and returns the default page-size for this level
+ */
+#define PTE_LEVEL_PAGE_SIZE(level) \
+ (1ULL << (12 + (9 * (level))))
+
+/*
+ * Bit value definition for I/O PTE fields
+ */
+#define IOMMU_PTE_PR BIT_ULL(0)
+#define IOMMU_PTE_U BIT_ULL(59)
+#define IOMMU_PTE_FC BIT_ULL(60)
+#define IOMMU_PTE_IR BIT_ULL(61)
+#define IOMMU_PTE_IW BIT_ULL(62)
+
+/*
+ * Bit value definition for DTE fields
+ */
+#define DTE_FLAG_V BIT_ULL(0)
+#define DTE_FLAG_TV BIT_ULL(1)
+#define DTE_FLAG_GIOV BIT_ULL(54)
+#define DTE_FLAG_GV BIT_ULL(55)
+#define DTE_GLX_SHIFT (56)
+#define DTE_GLX_MASK (3)
+#define DTE_FLAG_IR BIT_ULL(61)
+#define DTE_FLAG_IW BIT_ULL(62)
+
+#define DTE_FLAG_IOTLB BIT_ULL(32)
+#define DTE_FLAG_MASK (0x3ffULL << 32)
+#define DEV_DOMID_MASK 0xffffULL
+
+#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
+#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
+#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL)
+
+#define DTE_GCR3_INDEX_A 0
+#define DTE_GCR3_INDEX_B 1
+#define DTE_GCR3_INDEX_C 1
+
+#define DTE_GCR3_SHIFT_A 58
+#define DTE_GCR3_SHIFT_B 16
+#define DTE_GCR3_SHIFT_C 43
+
+#define DTE_GPT_LEVEL_SHIFT 54
+
+#define GCR3_VALID 0x01ULL
+
+#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
+#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR)
+#define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK))
+#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
+
+#define IOMMU_PROT_MASK 0x03
+#define IOMMU_PROT_IR 0x01
+#define IOMMU_PROT_IW 0x02
+
+#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2)
+
+/* IOMMU capabilities */
+#define IOMMU_CAP_IOTLB 24
+#define IOMMU_CAP_NPCACHE 26
+#define IOMMU_CAP_EFR 27
+
+/* IOMMU IVINFO */
+#define IOMMU_IVINFO_OFFSET 36
+#define IOMMU_IVINFO_EFRSUP BIT(0)
+#define IOMMU_IVINFO_DMA_REMAP BIT(1)
+
+/* IOMMU Feature Reporting Field (for IVHD type 10h */
+#define IOMMU_FEAT_GASUP_SHIFT 6
+
+/* IOMMU Extended Feature Register (EFR) */
+#define IOMMU_EFR_XTSUP_SHIFT 2
+#define IOMMU_EFR_GASUP_SHIFT 7
+#define IOMMU_EFR_MSICAPMMIOSUP_SHIFT 46
+
+#define MAX_DOMAIN_ID 65536
+
+/* Protection domain flags */
+#define PD_DMA_OPS_MASK BIT(0) /* domain used for dma_ops */
+#define PD_DEFAULT_MASK BIT(1) /* domain is a default dma_ops
+ domain for an IOMMU */
+#define PD_PASSTHROUGH_MASK BIT(2) /* domain has no page
+ translation */
+#define PD_IOMMUV2_MASK BIT(3) /* domain has gcr3 table */
+#define PD_GIOV_MASK BIT(4) /* domain enable GIOV support */
+
+extern bool amd_iommu_dump;
+#define DUMP_printk(format, arg...) \
+ do { \
+ if (amd_iommu_dump) \
+ pr_info("AMD-Vi: " format, ## arg); \
+ } while(0);
+
+/* global flag if IOMMUs cache non-present entries */
+extern bool amd_iommu_np_cache;
+/* Only true if all IOMMUs support device IOTLBs */
+extern bool amd_iommu_iotlb_sup;
+
+struct irq_remap_table {
+ raw_spinlock_t lock;
+ unsigned min_index;
+ u32 *table;
+};
+
+/* Interrupt remapping feature used? */
+extern bool amd_iommu_irq_remap;
+
+extern const struct iommu_ops amd_iommu_ops;
+
+/* IVRS indicates that pre-boot remapping was enabled */
+extern bool amdr_ivrs_remap_support;
+
+/* kmem_cache to get tables with 128 byte alignement */
+extern struct kmem_cache *amd_iommu_irq_cache;
+
+#define PCI_SBDF_TO_SEGID(sbdf) (((sbdf) >> 16) & 0xffff)
+#define PCI_SBDF_TO_DEVID(sbdf) ((sbdf) & 0xffff)
+#define PCI_SEG_DEVID_TO_SBDF(seg, devid) ((((u32)(seg) & 0xffff) << 16) | \
+ ((devid) & 0xffff))
+
+/* Make iterating over all pci segment easier */
+#define for_each_pci_segment(pci_seg) \
+ list_for_each_entry((pci_seg), &amd_iommu_pci_seg_list, list)
+#define for_each_pci_segment_safe(pci_seg, next) \
+ list_for_each_entry_safe((pci_seg), (next), &amd_iommu_pci_seg_list, list)
+/*
+ * Make iterating over all IOMMUs easier
+ */
+#define for_each_iommu(iommu) \
+ list_for_each_entry((iommu), &amd_iommu_list, list)
+#define for_each_iommu_safe(iommu, next) \
+ list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
+
+#define APERTURE_RANGE_SHIFT 27 /* 128 MB */
+#define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT)
+#define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT)
+#define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */
+#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
+#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
+
+/*
+ * This struct is used to pass information about
+ * incoming PPR faults around.
+ */
+struct amd_iommu_fault {
+ u64 address; /* IO virtual address of the fault*/
+ u32 pasid; /* Address space identifier */
+ u32 sbdf; /* Originating PCI device id */
+ u16 tag; /* PPR tag */
+ u16 flags; /* Fault flags */
+
+};
+
+
+struct amd_iommu;
+struct iommu_domain;
+struct irq_domain;
+struct amd_irte_ops;
+
+#define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0)
+
+#define io_pgtable_to_data(x) \
+ container_of((x), struct amd_io_pgtable, iop)
+
+#define io_pgtable_ops_to_data(x) \
+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+
+#define io_pgtable_ops_to_domain(x) \
+ container_of(io_pgtable_ops_to_data(x), \
+ struct protection_domain, iop)
+
+#define io_pgtable_cfg_to_data(x) \
+ container_of((x), struct amd_io_pgtable, pgtbl_cfg)
+
+struct amd_io_pgtable {
+ struct io_pgtable_cfg pgtbl_cfg;
+ struct io_pgtable iop;
+ int mode;
+ u64 *root;
+ atomic64_t pt_root; /* pgtable root and pgtable mode */
+ u64 *pgd; /* v2 pgtable pgd pointer */
+};
+
+/*
+ * This structure contains generic data for IOMMU protection domains
+ * independent of their use.
+ */
+struct protection_domain {
+ struct list_head dev_list; /* List of all devices in this domain */
+ struct iommu_domain domain; /* generic domain handle used by
+ iommu core code */
+ struct amd_io_pgtable iop;
+ spinlock_t lock; /* mostly used to lock the page table*/
+ u16 id; /* the domain id written to the device table */
+ int glx; /* Number of levels for GCR3 table */
+ int nid; /* Node ID */
+ u64 *gcr3_tbl; /* Guest CR3 table */
+ unsigned long flags; /* flags to find out type of domain */
+ unsigned dev_cnt; /* devices assigned to this domain */
+ unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
+};
+
+/*
+ * This structure contains information about one PCI segment in the system.
+ */
+struct amd_iommu_pci_seg {
+ /* List with all PCI segments in the system */
+ struct list_head list;
+
+ /* List of all available dev_data structures */
+ struct llist_head dev_data_list;
+
+ /* PCI segment number */
+ u16 id;
+
+ /* Largest PCI device id we expect translation requests for */
+ u16 last_bdf;
+
+ /* Size of the device table */
+ u32 dev_table_size;
+
+ /* Size of the alias table */
+ u32 alias_table_size;
+
+ /* Size of the rlookup table */
+ u32 rlookup_table_size;
+
+ /*
+ * device table virtual address
+ *
+ * Pointer to the per PCI segment device table.
+ * It is indexed by the PCI device id or the HT unit id and contains
+ * information about the domain the device belongs to as well as the
+ * page table root pointer.
+ */
+ struct dev_table_entry *dev_table;
+
+ /*
+ * The rlookup iommu table is used to find the IOMMU which is
+ * responsible for a specific device. It is indexed by the PCI
+ * device id.
+ */
+ struct amd_iommu **rlookup_table;
+
+ /*
+ * This table is used to find the irq remapping table for a given
+ * device id quickly.
+ */
+ struct irq_remap_table **irq_lookup_table;
+
+ /*
+ * Pointer to a device table which the content of old device table
+ * will be copied to. It's only be used in kdump kernel.
+ */
+ struct dev_table_entry *old_dev_tbl_cpy;
+
+ /*
+ * The alias table is a driver specific data structure which contains the
+ * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
+ * More than one device can share the same requestor id.
+ */
+ u16 *alias_table;
+
+ /*
+ * A list of required unity mappings we find in ACPI. It is not locked
+ * because as runtime it is only read. It is created at ACPI table
+ * parsing time.
+ */
+ struct list_head unity_map;
+};
+
+/*
+ * Structure where we save information about one hardware AMD IOMMU in the
+ * system.
+ */
+struct amd_iommu {
+ struct list_head list;
+
+ /* Index within the IOMMU array */
+ int index;
+
+ /* locks the accesses to the hardware */
+ raw_spinlock_t lock;
+
+ /* Pointer to PCI device of this IOMMU */
+ struct pci_dev *dev;
+
+ /* Cache pdev to root device for resume quirks */
+ struct pci_dev *root_pdev;
+
+ /* physical address of MMIO space */
+ u64 mmio_phys;
+
+ /* physical end address of MMIO space */
+ u64 mmio_phys_end;
+
+ /* virtual address of MMIO space */
+ u8 __iomem *mmio_base;
+
+ /* capabilities of that IOMMU read from ACPI */
+ u32 cap;
+
+ /* flags read from acpi table */
+ u8 acpi_flags;
+
+ /* Extended features */
+ u64 features;
+
+ /* Extended features 2 */
+ u64 features2;
+
+ /* IOMMUv2 */
+ bool is_iommu_v2;
+
+ /* PCI device id of the IOMMU device */
+ u16 devid;
+
+ /*
+ * Capability pointer. There could be more than one IOMMU per PCI
+ * device function if there are more than one AMD IOMMU capability
+ * pointers.
+ */
+ u16 cap_ptr;
+
+ /* pci domain of this IOMMU */
+ struct amd_iommu_pci_seg *pci_seg;
+
+ /* start of exclusion range of that IOMMU */
+ u64 exclusion_start;
+ /* length of exclusion range of that IOMMU */
+ u64 exclusion_length;
+
+ /* command buffer virtual address */
+ u8 *cmd_buf;
+ u32 cmd_buf_head;
+ u32 cmd_buf_tail;
+
+ /* event buffer virtual address */
+ u8 *evt_buf;
+
+ /* Name for event log interrupt */
+ unsigned char evt_irq_name[16];
+
+ /* Base of the PPR log, if present */
+ u8 *ppr_log;
+
+ /* Name for PPR log interrupt */
+ unsigned char ppr_irq_name[16];
+
+ /* Base of the GA log, if present */
+ u8 *ga_log;
+
+ /* Name for GA log interrupt */
+ unsigned char ga_irq_name[16];
+
+ /* Tail of the GA log, if present */
+ u8 *ga_log_tail;
+
+ /* true if interrupts for this IOMMU are already enabled */
+ bool int_enabled;
+
+ /* if one, we need to send a completion wait command */
+ bool need_sync;
+
+ /* true if disable irte caching */
+ bool irtcachedis_enabled;
+
+ /* Handle for IOMMU core code */
+ struct iommu_device iommu;
+
+ /*
+ * We can't rely on the BIOS to restore all values on reinit, so we
+ * need to stash them
+ */
+
+ /* The iommu BAR */
+ u32 stored_addr_lo;
+ u32 stored_addr_hi;
+
+ /*
+ * Each iommu has 6 l1s, each of which is documented as having 0x12
+ * registers
+ */
+ u32 stored_l1[6][0x12];
+
+ /* The l2 indirect registers */
+ u32 stored_l2[0x83];
+
+ /* The maximum PC banks and counters/bank (PCSup=1) */
+ u8 max_banks;
+ u8 max_counters;
+#ifdef CONFIG_IRQ_REMAP
+ struct irq_domain *ir_domain;
+
+ struct amd_irte_ops *irte_ops;
+#endif
+
+ u32 flags;
+ volatile u64 *cmd_sem;
+ atomic64_t cmd_sem_val;
+
+#ifdef CONFIG_AMD_IOMMU_DEBUGFS
+ /* DebugFS Info */
+ struct dentry *debugfs;
+#endif
+};
+
+static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
+{
+ struct iommu_device *iommu = dev_to_iommu_device(dev);
+
+ return container_of(iommu, struct amd_iommu, iommu);
+}
+
+#define ACPIHID_UID_LEN 256
+#define ACPIHID_HID_LEN 9
+
+struct acpihid_map_entry {
+ struct list_head list;
+ u8 uid[ACPIHID_UID_LEN];
+ u8 hid[ACPIHID_HID_LEN];
+ u32 devid;
+ u32 root_devid;
+ bool cmd_line;
+ struct iommu_group *group;
+};
+
+struct devid_map {
+ struct list_head list;
+ u8 id;
+ u32 devid;
+ bool cmd_line;
+};
+
+/*
+ * This struct contains device specific data for the IOMMU
+ */
+struct iommu_dev_data {
+ /*Protect against attach/detach races */
+ spinlock_t lock;
+
+ struct list_head list; /* For domain->dev_list */
+ struct llist_node dev_data_list; /* For global dev_data_list */
+ struct protection_domain *domain; /* Domain the device is bound to */
+ struct device *dev;
+ u16 devid; /* PCI Device ID */
+ bool iommu_v2; /* Device can make use of IOMMUv2 */
+ struct {
+ bool enabled;
+ int qdep;
+ } ats; /* ATS state */
+ bool pri_tlp; /* PASID TLB required for
+ PPR completions */
+ bool use_vapic; /* Enable device to use vapic mode */
+ bool defer_attach;
+
+ struct ratelimit_state rs; /* Ratelimit IOPF messages */
+};
+
+/* Map HPET and IOAPIC ids to the devid used by the IOMMU */
+extern struct list_head ioapic_map;
+extern struct list_head hpet_map;
+extern struct list_head acpihid_map;
+
+/*
+ * List with all PCI segments in the system. This list is not locked because
+ * it is only written at driver initialization time
+ */
+extern struct list_head amd_iommu_pci_seg_list;
+
+/*
+ * List with all IOMMUs in the system. This list is not locked because it is
+ * only written and read at driver initialization or suspend time
+ */
+extern struct list_head amd_iommu_list;
+
+/*
+ * Array with pointers to each IOMMU struct
+ * The indices are referenced in the protection domains
+ */
+extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
+
+/*
+ * Structure defining one entry in the device table
+ */
+struct dev_table_entry {
+ u64 data[4];
+};
+
+/*
+ * One entry for unity mappings parsed out of the ACPI table.
+ */
+struct unity_map_entry {
+ struct list_head list;
+
+ /* starting device id this entry is used for (including) */
+ u16 devid_start;
+ /* end device id this entry is used for (including) */
+ u16 devid_end;
+
+ /* start address to unity map (including) */
+ u64 address_start;
+ /* end address to unity map (including) */
+ u64 address_end;
+
+ /* required protection */
+ int prot;
+};
+
+/*
+ * Data structures for device handling
+ */
+
+/* size of the dma_ops aperture as power of 2 */
+extern unsigned amd_iommu_aperture_order;
+
+/* allocation bitmap for domain ids */
+extern unsigned long *amd_iommu_pd_alloc_bitmap;
+
+/* Smallest max PASID supported by any IOMMU in the system */
+extern u32 amd_iommu_max_pasid;
+
+extern bool amd_iommu_v2_present;
+
+extern bool amd_iommu_force_isolation;
+
+/* Max levels of glxval supported */
+extern int amd_iommu_max_glx_val;
+
+/*
+ * This function flushes all internal caches of
+ * the IOMMU used by this driver.
+ */
+void iommu_flush_all_caches(struct amd_iommu *iommu);
+
+static inline int get_ioapic_devid(int id)
+{
+ struct devid_map *entry;
+
+ list_for_each_entry(entry, &ioapic_map, list) {
+ if (entry->id == id)
+ return entry->devid;
+ }
+
+ return -EINVAL;
+}
+
+static inline int get_hpet_devid(int id)
+{
+ struct devid_map *entry;
+
+ list_for_each_entry(entry, &hpet_map, list) {
+ if (entry->id == id)
+ return entry->devid;
+ }
+
+ return -EINVAL;
+}
+
+enum amd_iommu_intr_mode_type {
+ AMD_IOMMU_GUEST_IR_LEGACY,
+
+ /* This mode is not visible to users. It is used when
+ * we cannot fully enable vAPIC and fallback to only support
+ * legacy interrupt remapping via 128-bit IRTE.
+ */
+ AMD_IOMMU_GUEST_IR_LEGACY_GA,
+ AMD_IOMMU_GUEST_IR_VAPIC,
+};
+
+#define AMD_IOMMU_GUEST_IR_GA(x) (x == AMD_IOMMU_GUEST_IR_VAPIC || \
+ x == AMD_IOMMU_GUEST_IR_LEGACY_GA)
+
+#define AMD_IOMMU_GUEST_IR_VAPIC(x) (x == AMD_IOMMU_GUEST_IR_VAPIC)
+
+union irte {
+ u32 val;
+ struct {
+ u32 valid : 1,
+ no_fault : 1,
+ int_type : 3,
+ rq_eoi : 1,
+ dm : 1,
+ rsvd_1 : 1,
+ destination : 8,
+ vector : 8,
+ rsvd_2 : 8;
+ } fields;
+};
+
+#define APICID_TO_IRTE_DEST_LO(x) (x & 0xffffff)
+#define APICID_TO_IRTE_DEST_HI(x) ((x >> 24) & 0xff)
+
+union irte_ga_lo {
+ u64 val;
+
+ /* For int remapping */
+ struct {
+ u64 valid : 1,
+ no_fault : 1,
+ /* ------ */
+ int_type : 3,
+ rq_eoi : 1,
+ dm : 1,
+ /* ------ */
+ guest_mode : 1,
+ destination : 24,
+ ga_tag : 32;
+ } fields_remap;
+
+ /* For guest vAPIC */
+ struct {
+ u64 valid : 1,
+ no_fault : 1,
+ /* ------ */
+ ga_log_intr : 1,
+ rsvd1 : 3,
+ is_run : 1,
+ /* ------ */
+ guest_mode : 1,
+ destination : 24,
+ ga_tag : 32;
+ } fields_vapic;
+};
+
+union irte_ga_hi {
+ u64 val;
+ struct {
+ u64 vector : 8,
+ rsvd_1 : 4,
+ ga_root_ptr : 40,
+ rsvd_2 : 4,
+ destination : 8;
+ } fields;
+};
+
+struct irte_ga {
+ union {
+ struct {
+ union irte_ga_lo lo;
+ union irte_ga_hi hi;
+ };
+ u128 irte;
+ };
+};
+
+struct irq_2_irte {
+ u16 devid; /* Device ID for IRTE table */
+ u16 index; /* Index into IRTE table*/
+};
+
+struct amd_ir_data {
+ u32 cached_ga_tag;
+ struct amd_iommu *iommu;
+ struct irq_2_irte irq_2_irte;
+ struct msi_msg msi_entry;
+ void *entry; /* Pointer to union irte or struct irte_ga */
+
+ /**
+ * Store information for activate/de-activate
+ * Guest virtual APIC mode during runtime.
+ */
+ struct irq_cfg *cfg;
+ int ga_vector;
+ u64 ga_root_ptr;
+ u32 ga_tag;
+};
+
+struct amd_irte_ops {
+ void (*prepare)(void *, u32, bool, u8, u32, int);
+ void (*activate)(struct amd_iommu *iommu, void *, u16, u16);
+ void (*deactivate)(struct amd_iommu *iommu, void *, u16, u16);
+ void (*set_affinity)(struct amd_iommu *iommu, void *, u16, u16, u8, u32);
+ void *(*get)(struct irq_remap_table *, int);
+ void (*set_allocated)(struct irq_remap_table *, int);
+ bool (*is_allocated)(struct irq_remap_table *, int);
+ void (*clear_allocated)(struct irq_remap_table *, int);
+};
+
+#ifdef CONFIG_IRQ_REMAP
+extern struct amd_irte_ops irte_32_ops;
+extern struct amd_irte_ops irte_128_ops;
+#endif
+
+#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/drivers/iommu/amd/debugfs.c b/drivers/iommu/amd/debugfs.c
new file mode 100644
index 0000000000..545372fcc7
--- /dev/null
+++ b/drivers/iommu/amd/debugfs.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD IOMMU driver
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+
+#include "amd_iommu.h"
+
+static struct dentry *amd_iommu_debugfs;
+static DEFINE_MUTEX(amd_iommu_debugfs_lock);
+
+#define MAX_NAME_LEN 20
+
+void amd_iommu_debugfs_setup(struct amd_iommu *iommu)
+{
+ char name[MAX_NAME_LEN + 1];
+
+ mutex_lock(&amd_iommu_debugfs_lock);
+ if (!amd_iommu_debugfs)
+ amd_iommu_debugfs = debugfs_create_dir("amd",
+ iommu_debugfs_dir);
+ mutex_unlock(&amd_iommu_debugfs_lock);
+
+ snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index);
+ iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs);
+}
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
new file mode 100644
index 0000000000..45efb7e5d7
--- /dev/null
+++ b/drivers/iommu/amd/init.c
@@ -0,0 +1,3841 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <jroedel@suse.de>
+ * Leo Duran <leo.duran@amd.com>
+ */
+
+#define pr_fmt(fmt) "AMD-Vi: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
+
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/list.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/irq.h>
+#include <linux/amd-iommu.h>
+#include <linux/export.h>
+#include <linux/kmemleak.h>
+#include <linux/cc_platform.h>
+#include <linux/iopoll.h>
+#include <asm/pci-direct.h>
+#include <asm/iommu.h>
+#include <asm/apic.h>
+#include <asm/gart.h>
+#include <asm/x86_init.h>
+#include <asm/io_apic.h>
+#include <asm/irq_remapping.h>
+#include <asm/set_memory.h>
+
+#include <linux/crash_dump.h>
+
+#include "amd_iommu.h"
+#include "../irq_remapping.h"
+
+/*
+ * definitions for the ACPI scanning code
+ */
+#define IVRS_HEADER_LENGTH 48
+
+#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
+#define ACPI_IVMD_TYPE_ALL 0x20
+#define ACPI_IVMD_TYPE 0x21
+#define ACPI_IVMD_TYPE_RANGE 0x22
+
+#define IVHD_DEV_ALL 0x01
+#define IVHD_DEV_SELECT 0x02
+#define IVHD_DEV_SELECT_RANGE_START 0x03
+#define IVHD_DEV_RANGE_END 0x04
+#define IVHD_DEV_ALIAS 0x42
+#define IVHD_DEV_ALIAS_RANGE 0x43
+#define IVHD_DEV_EXT_SELECT 0x46
+#define IVHD_DEV_EXT_SELECT_RANGE 0x47
+#define IVHD_DEV_SPECIAL 0x48
+#define IVHD_DEV_ACPI_HID 0xf0
+
+#define UID_NOT_PRESENT 0
+#define UID_IS_INTEGER 1
+#define UID_IS_CHARACTER 2
+
+#define IVHD_SPECIAL_IOAPIC 1
+#define IVHD_SPECIAL_HPET 2
+
+#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
+#define IVHD_FLAG_PASSPW_EN_MASK 0x02
+#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
+#define IVHD_FLAG_ISOC_EN_MASK 0x08
+
+#define IVMD_FLAG_EXCL_RANGE 0x08
+#define IVMD_FLAG_IW 0x04
+#define IVMD_FLAG_IR 0x02
+#define IVMD_FLAG_UNITY_MAP 0x01
+
+#define ACPI_DEVFLAG_INITPASS 0x01
+#define ACPI_DEVFLAG_EXTINT 0x02
+#define ACPI_DEVFLAG_NMI 0x04
+#define ACPI_DEVFLAG_SYSMGT1 0x10
+#define ACPI_DEVFLAG_SYSMGT2 0x20
+#define ACPI_DEVFLAG_LINT0 0x40
+#define ACPI_DEVFLAG_LINT1 0x80
+#define ACPI_DEVFLAG_ATSDIS 0x10000000
+
+#define LOOP_TIMEOUT 2000000
+
+#define IVRS_GET_SBDF_ID(seg, bus, dev, fn) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
+ | ((dev & 0x1f) << 3) | (fn & 0x7))
+
+/*
+ * ACPI table definitions
+ *
+ * These data structures are laid over the table to parse the important values
+ * out of it.
+ */
+
+/*
+ * structure describing one IOMMU in the ACPI table. Typically followed by one
+ * or more ivhd_entrys.
+ */
+struct ivhd_header {
+ u8 type;
+ u8 flags;
+ u16 length;
+ u16 devid;
+ u16 cap_ptr;
+ u64 mmio_phys;
+ u16 pci_seg;
+ u16 info;
+ u32 efr_attr;
+
+ /* Following only valid on IVHD type 11h and 40h */
+ u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
+ u64 efr_reg2;
+} __attribute__((packed));
+
+/*
+ * A device entry describing which devices a specific IOMMU translates and
+ * which requestor ids they use.
+ */
+struct ivhd_entry {
+ u8 type;
+ u16 devid;
+ u8 flags;
+ struct_group(ext_hid,
+ u32 ext;
+ u32 hidh;
+ );
+ u64 cid;
+ u8 uidf;
+ u8 uidl;
+ u8 uid;
+} __attribute__((packed));
+
+/*
+ * An AMD IOMMU memory definition structure. It defines things like exclusion
+ * ranges for devices and regions that should be unity mapped.
+ */
+struct ivmd_header {
+ u8 type;
+ u8 flags;
+ u16 length;
+ u16 devid;
+ u16 aux;
+ u16 pci_seg;
+ u8 resv[6];
+ u64 range_start;
+ u64 range_length;
+} __attribute__((packed));
+
+bool amd_iommu_dump;
+bool amd_iommu_irq_remap __read_mostly;
+
+enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
+/* Guest page table level */
+int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL;
+
+int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
+static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
+
+static bool amd_iommu_detected;
+static bool amd_iommu_disabled __initdata;
+static bool amd_iommu_force_enable __initdata;
+static bool amd_iommu_irtcachedis;
+static int amd_iommu_target_ivhd_type;
+
+/* Global EFR and EFR2 registers */
+u64 amd_iommu_efr;
+u64 amd_iommu_efr2;
+
+/* SNP is enabled on the system? */
+bool amd_iommu_snp_en;
+EXPORT_SYMBOL(amd_iommu_snp_en);
+
+LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */
+LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
+ system */
+
+/* Array to assign indices to IOMMUs*/
+struct amd_iommu *amd_iommus[MAX_IOMMUS];
+
+/* Number of IOMMUs present in the system */
+static int amd_iommus_present;
+
+/* IOMMUs have a non-present cache? */
+bool amd_iommu_np_cache __read_mostly;
+bool amd_iommu_iotlb_sup __read_mostly = true;
+
+u32 amd_iommu_max_pasid __read_mostly = ~0;
+
+bool amd_iommu_v2_present __read_mostly;
+static bool amd_iommu_pc_present __read_mostly;
+bool amdr_ivrs_remap_support __read_mostly;
+
+bool amd_iommu_force_isolation __read_mostly;
+
+/*
+ * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
+ * to know which ones are already in use.
+ */
+unsigned long *amd_iommu_pd_alloc_bitmap;
+
+enum iommu_init_state {
+ IOMMU_START_STATE,
+ IOMMU_IVRS_DETECTED,
+ IOMMU_ACPI_FINISHED,
+ IOMMU_ENABLED,
+ IOMMU_PCI_INIT,
+ IOMMU_INTERRUPTS_EN,
+ IOMMU_INITIALIZED,
+ IOMMU_NOT_FOUND,
+ IOMMU_INIT_ERROR,
+ IOMMU_CMDLINE_DISABLED,
+};
+
+/* Early ioapic and hpet maps from kernel command line */
+#define EARLY_MAP_SIZE 4
+static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
+static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
+static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
+
+static int __initdata early_ioapic_map_size;
+static int __initdata early_hpet_map_size;
+static int __initdata early_acpihid_map_size;
+
+static bool __initdata cmdline_maps;
+
+static enum iommu_init_state init_state = IOMMU_START_STATE;
+
+static int amd_iommu_enable_interrupts(void);
+static int __init iommu_go_to_state(enum iommu_init_state state);
+static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);
+
+static bool amd_iommu_pre_enabled = true;
+
+static u32 amd_iommu_ivinfo __initdata;
+
+bool translation_pre_enabled(struct amd_iommu *iommu)
+{
+ return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
+}
+
+static void clear_translation_pre_enabled(struct amd_iommu *iommu)
+{
+ iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
+}
+
+static void init_translation_status(struct amd_iommu *iommu)
+{
+ u64 ctrl;
+
+ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ if (ctrl & (1<<CONTROL_IOMMU_EN))
+ iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
+}
+
+static inline unsigned long tbl_size(int entry_size, int last_bdf)
+{
+ unsigned shift = PAGE_SHIFT +
+ get_order((last_bdf + 1) * entry_size);
+
+ return 1UL << shift;
+}
+
+int amd_iommu_get_num_iommus(void)
+{
+ return amd_iommus_present;
+}
+
+/*
+ * Iterate through all the IOMMUs to get common EFR
+ * masks among all IOMMUs and warn if found inconsistency.
+ */
+static void get_global_efr(void)
+{
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ u64 tmp = iommu->features;
+ u64 tmp2 = iommu->features2;
+
+ if (list_is_first(&iommu->list, &amd_iommu_list)) {
+ amd_iommu_efr = tmp;
+ amd_iommu_efr2 = tmp2;
+ continue;
+ }
+
+ if (amd_iommu_efr == tmp &&
+ amd_iommu_efr2 == tmp2)
+ continue;
+
+ pr_err(FW_BUG
+ "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n",
+ tmp, tmp2, amd_iommu_efr, amd_iommu_efr2,
+ iommu->index, iommu->pci_seg->id,
+ PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid),
+ PCI_FUNC(iommu->devid));
+
+ amd_iommu_efr &= tmp;
+ amd_iommu_efr2 &= tmp2;
+ }
+
+ pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
+}
+
+static bool check_feature_on_all_iommus(u64 mask)
+{
+ return !!(amd_iommu_efr & mask);
+}
+
+static inline int check_feature_gpt_level(void)
+{
+ return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
+}
+
+/*
+ * For IVHD type 0x11/0x40, EFR is also available via IVHD.
+ * Default to IVHD EFR since it is available sooner
+ * (i.e. before PCI init).
+ */
+static void __init early_iommu_features_init(struct amd_iommu *iommu,
+ struct ivhd_header *h)
+{
+ if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) {
+ iommu->features = h->efr_reg;
+ iommu->features2 = h->efr_reg2;
+ }
+ if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP)
+ amdr_ivrs_remap_support = true;
+}
+
+/* Access to l1 and l2 indexed register spaces */
+
+static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
+{
+ u32 val;
+
+ pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
+ pci_read_config_dword(iommu->dev, 0xfc, &val);
+ return val;
+}
+
+static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
+{
+ pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
+ pci_write_config_dword(iommu->dev, 0xfc, val);
+ pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
+}
+
+static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
+{
+ u32 val;
+
+ pci_write_config_dword(iommu->dev, 0xf0, address);
+ pci_read_config_dword(iommu->dev, 0xf4, &val);
+ return val;
+}
+
+static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
+{
+ pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
+ pci_write_config_dword(iommu->dev, 0xf4, val);
+}
+
+/****************************************************************************
+ *
+ * AMD IOMMU MMIO register space handling functions
+ *
+ * These functions are used to program the IOMMU device registers in
+ * MMIO space required for that driver.
+ *
+ ****************************************************************************/
+
+/*
+ * This function set the exclusion range in the IOMMU. DMA accesses to the
+ * exclusion range are passed through untranslated
+ */
+static void iommu_set_exclusion_range(struct amd_iommu *iommu)
+{
+ u64 start = iommu->exclusion_start & PAGE_MASK;
+ u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
+ u64 entry;
+
+ if (!iommu->exclusion_start)
+ return;
+
+ entry = start | MMIO_EXCL_ENABLE_MASK;
+ memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
+ &entry, sizeof(entry));
+
+ entry = limit;
+ memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
+ &entry, sizeof(entry));
+}
+
+static void iommu_set_cwwb_range(struct amd_iommu *iommu)
+{
+ u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
+ u64 entry = start & PM_ADDR_MASK;
+
+ if (!check_feature_on_all_iommus(FEATURE_SNP))
+ return;
+
+ /* Note:
+ * Re-purpose Exclusion base/limit registers for Completion wait
+ * write-back base/limit.
+ */
+ memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
+ &entry, sizeof(entry));
+
+ /* Note:
+ * Default to 4 Kbytes, which can be specified by setting base
+ * address equal to the limit address.
+ */
+ memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
+ &entry, sizeof(entry));
+}
+
+/* Programs the physical address of the device table into the IOMMU hardware */
+static void iommu_set_device_table(struct amd_iommu *iommu)
+{
+ u64 entry;
+ u32 dev_table_size = iommu->pci_seg->dev_table_size;
+ void *dev_table = (void *)get_dev_table(iommu);
+
+ BUG_ON(iommu->mmio_base == NULL);
+
+ entry = iommu_virt_to_phys(dev_table);
+ entry |= (dev_table_size >> 12) - 1;
+ memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
+ &entry, sizeof(entry));
+}
+
+/* Generic functions to enable/disable certain features of the IOMMU. */
+static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
+{
+ u64 ctrl;
+
+ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl |= (1ULL << bit);
+ writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+}
+
+static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
+{
+ u64 ctrl;
+
+ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl &= ~(1ULL << bit);
+ writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+}
+
+static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
+{
+ u64 ctrl;
+
+ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl &= ~CTRL_INV_TO_MASK;
+ ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
+ writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+}
+
+/* Function to enable the hardware */
+static void iommu_enable(struct amd_iommu *iommu)
+{
+ iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
+}
+
+static void iommu_disable(struct amd_iommu *iommu)
+{
+ if (!iommu->mmio_base)
+ return;
+
+ /* Disable command buffer */
+ iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
+
+ /* Disable event logging and event interrupts */
+ iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
+ iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
+
+ /* Disable IOMMU GA_LOG */
+ iommu_feature_disable(iommu, CONTROL_GALOG_EN);
+ iommu_feature_disable(iommu, CONTROL_GAINT_EN);
+
+ /* Disable IOMMU PPR logging */
+ iommu_feature_disable(iommu, CONTROL_PPRLOG_EN);
+ iommu_feature_disable(iommu, CONTROL_PPRINT_EN);
+
+ /* Disable IOMMU hardware itself */
+ iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
+
+ /* Clear IRTE cache disabling bit */
+ iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
+}
+
+/*
+ * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
+ * the system has one.
+ */
+static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
+{
+ if (!request_mem_region(address, end, "amd_iommu")) {
+ pr_err("Can not reserve memory region %llx-%llx for mmio\n",
+ address, end);
+ pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
+ return NULL;
+ }
+
+ return (u8 __iomem *)ioremap(address, end);
+}
+
+static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
+{
+ if (iommu->mmio_base)
+ iounmap(iommu->mmio_base);
+ release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
+}
+
+static inline u32 get_ivhd_header_size(struct ivhd_header *h)
+{
+ u32 size = 0;
+
+ switch (h->type) {
+ case 0x10:
+ size = 24;
+ break;
+ case 0x11:
+ case 0x40:
+ size = 40;
+ break;
+ }
+ return size;
+}
+
+/****************************************************************************
+ *
+ * The functions below belong to the first pass of AMD IOMMU ACPI table
+ * parsing. In this pass we try to find out the highest device id this
+ * code has to handle. Upon this information the size of the shared data
+ * structures is determined later.
+ *
+ ****************************************************************************/
+
+/*
+ * This function calculates the length of a given IVHD entry
+ */
+static inline int ivhd_entry_length(u8 *ivhd)
+{
+ u32 type = ((struct ivhd_entry *)ivhd)->type;
+
+ if (type < 0x80) {
+ return 0x04 << (*ivhd >> 6);
+ } else if (type == IVHD_DEV_ACPI_HID) {
+ /* For ACPI_HID, offset 21 is uid len */
+ return *((u8 *)ivhd + 21) + 22;
+ }
+ return 0;
+}
+
+/*
+ * After reading the highest device id from the IOMMU PCI capability header
+ * this function looks if there is a higher device id defined in the ACPI table
+ */
+static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
+{
+ u8 *p = (void *)h, *end = (void *)h;
+ struct ivhd_entry *dev;
+ int last_devid = -EINVAL;
+
+ u32 ivhd_size = get_ivhd_header_size(h);
+
+ if (!ivhd_size) {
+ pr_err("Unsupported IVHD type %#x\n", h->type);
+ return -EINVAL;
+ }
+
+ p += ivhd_size;
+ end += h->length;
+
+ while (p < end) {
+ dev = (struct ivhd_entry *)p;
+ switch (dev->type) {
+ case IVHD_DEV_ALL:
+ /* Use maximum BDF value for DEV_ALL */
+ return 0xffff;
+ case IVHD_DEV_SELECT:
+ case IVHD_DEV_RANGE_END:
+ case IVHD_DEV_ALIAS:
+ case IVHD_DEV_EXT_SELECT:
+ /* all the above subfield types refer to device ids */
+ if (dev->devid > last_devid)
+ last_devid = dev->devid;
+ break;
+ default:
+ break;
+ }
+ p += ivhd_entry_length(p);
+ }
+
+ WARN_ON(p != end);
+
+ return last_devid;
+}
+
+static int __init check_ivrs_checksum(struct acpi_table_header *table)
+{
+ int i;
+ u8 checksum = 0, *p = (u8 *)table;
+
+ for (i = 0; i < table->length; ++i)
+ checksum += p[i];
+ if (checksum != 0) {
+ /* ACPI table corrupt */
+ pr_err(FW_BUG "IVRS invalid checksum\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/*
+ * Iterate over all IVHD entries in the ACPI table and find the highest device
+ * id which we need to handle. This is the first of three functions which parse
+ * the ACPI table. So we check the checksum here.
+ */
+static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg)
+{
+ u8 *p = (u8 *)table, *end = (u8 *)table;
+ struct ivhd_header *h;
+ int last_devid, last_bdf = 0;
+
+ p += IVRS_HEADER_LENGTH;
+
+ end += table->length;
+ while (p < end) {
+ h = (struct ivhd_header *)p;
+ if (h->pci_seg == pci_seg &&
+ h->type == amd_iommu_target_ivhd_type) {
+ last_devid = find_last_devid_from_ivhd(h);
+
+ if (last_devid < 0)
+ return -EINVAL;
+ if (last_devid > last_bdf)
+ last_bdf = last_devid;
+ }
+ p += h->length;
+ }
+ WARN_ON(p != end);
+
+ return last_bdf;
+}
+
+/****************************************************************************
+ *
+ * The following functions belong to the code path which parses the ACPI table
+ * the second time. In this ACPI parsing iteration we allocate IOMMU specific
+ * data structures, initialize the per PCI segment device/alias/rlookup table
+ * and also basically initialize the hardware.
+ *
+ ****************************************************************************/
+
+/* Allocate per PCI segment device table */
+static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
+ get_order(pci_seg->dev_table_size));
+ if (!pci_seg->dev_table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->dev_table,
+ get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = NULL;
+}
+
+/* Allocate per PCI segment IOMMU rlookup table. */
+static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->rlookup_table = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(pci_seg->rlookup_table_size));
+ if (pci_seg->rlookup_table == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->rlookup_table,
+ get_order(pci_seg->rlookup_table_size));
+ pci_seg->rlookup_table = NULL;
+}
+
+static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->irq_lookup_table = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(pci_seg->rlookup_table_size));
+ kmemleak_alloc(pci_seg->irq_lookup_table,
+ pci_seg->rlookup_table_size, 1, GFP_KERNEL);
+ if (pci_seg->irq_lookup_table == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ kmemleak_free(pci_seg->irq_lookup_table);
+ free_pages((unsigned long)pci_seg->irq_lookup_table,
+ get_order(pci_seg->rlookup_table_size));
+ pci_seg->irq_lookup_table = NULL;
+}
+
+static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ int i;
+
+ pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(pci_seg->alias_table_size));
+ if (!pci_seg->alias_table)
+ return -ENOMEM;
+
+ /*
+ * let all alias entries point to itself
+ */
+ for (i = 0; i <= pci_seg->last_bdf; ++i)
+ pci_seg->alias_table[i] = i;
+
+ return 0;
+}
+
+static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->alias_table,
+ get_order(pci_seg->alias_table_size));
+ pci_seg->alias_table = NULL;
+}
+
+/*
+ * Allocates the command buffer. This buffer is per AMD IOMMU. We can
+ * write commands to that buffer later and the IOMMU will execute them
+ * asynchronously
+ */
+static int __init alloc_command_buffer(struct amd_iommu *iommu)
+{
+ iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(CMD_BUFFER_SIZE));
+
+ return iommu->cmd_buf ? 0 : -ENOMEM;
+}
+
+/*
+ * Interrupt handler has processed all pending events and adjusted head
+ * and tail pointer. Reset overflow mask and restart logging again.
+ */
+static void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
+ u8 cntrl_intr, u8 cntrl_log,
+ u32 status_run_mask, u32 status_overflow_mask)
+{
+ u32 status;
+
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (status & status_run_mask)
+ return;
+
+ pr_info_ratelimited("IOMMU %s log restarting\n", evt_type);
+
+ iommu_feature_disable(iommu, cntrl_log);
+ iommu_feature_disable(iommu, cntrl_intr);
+
+ writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+ iommu_feature_enable(iommu, cntrl_intr);
+ iommu_feature_enable(iommu, cntrl_log);
+}
+
+/*
+ * This function restarts event logging in case the IOMMU experienced
+ * an event log buffer overflow.
+ */
+void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
+{
+ amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN,
+ CONTROL_EVT_LOG_EN, MMIO_STATUS_EVT_RUN_MASK,
+ MMIO_STATUS_EVT_OVERFLOW_MASK);
+}
+
+/*
+ * This function restarts event logging in case the IOMMU experienced
+ * GA log overflow.
+ */
+void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
+{
+ amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN,
+ CONTROL_GALOG_EN, MMIO_STATUS_GALOG_RUN_MASK,
+ MMIO_STATUS_GALOG_OVERFLOW_MASK);
+}
+
+/*
+ * This function restarts ppr logging in case the IOMMU experienced
+ * PPR log overflow.
+ */
+void amd_iommu_restart_ppr_log(struct amd_iommu *iommu)
+{
+ amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN,
+ CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK,
+ MMIO_STATUS_PPR_OVERFLOW_MASK);
+}
+
+/*
+ * This function resets the command buffer if the IOMMU stopped fetching
+ * commands from it.
+ */
+static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
+{
+ iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
+
+ writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+ iommu->cmd_buf_head = 0;
+ iommu->cmd_buf_tail = 0;
+
+ iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
+}
+
+/*
+ * This function writes the command buffer address to the hardware and
+ * enables it.
+ */
+static void iommu_enable_command_buffer(struct amd_iommu *iommu)
+{
+ u64 entry;
+
+ BUG_ON(iommu->cmd_buf == NULL);
+
+ entry = iommu_virt_to_phys(iommu->cmd_buf);
+ entry |= MMIO_CMD_SIZE_512;
+
+ memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
+ &entry, sizeof(entry));
+
+ amd_iommu_reset_cmd_buffer(iommu);
+}
+
+/*
+ * This function disables the command buffer
+ */
+static void iommu_disable_command_buffer(struct amd_iommu *iommu)
+{
+ iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
+}
+
+static void __init free_command_buffer(struct amd_iommu *iommu)
+{
+ free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
+}
+
+static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
+ gfp_t gfp, size_t size)
+{
+ int order = get_order(size);
+ void *buf = (void *)__get_free_pages(gfp, order);
+
+ if (buf &&
+ check_feature_on_all_iommus(FEATURE_SNP) &&
+ set_memory_4k((unsigned long)buf, (1 << order))) {
+ free_pages((unsigned long)buf, order);
+ buf = NULL;
+ }
+
+ return buf;
+}
+
+/* allocates the memory where the IOMMU will log its events to */
+static int __init alloc_event_buffer(struct amd_iommu *iommu)
+{
+ iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
+ EVT_BUFFER_SIZE);
+
+ return iommu->evt_buf ? 0 : -ENOMEM;
+}
+
+static void iommu_enable_event_buffer(struct amd_iommu *iommu)
+{
+ u64 entry;
+
+ BUG_ON(iommu->evt_buf == NULL);
+
+ entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
+
+ memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
+ &entry, sizeof(entry));
+
+ /* set head and tail to zero manually */
+ writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
+
+ iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+}
+
+/*
+ * This function disables the event log buffer
+ */
+static void iommu_disable_event_buffer(struct amd_iommu *iommu)
+{
+ iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
+}
+
+static void __init free_event_buffer(struct amd_iommu *iommu)
+{
+ free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
+}
+
+/* allocates the memory where the IOMMU will log its events to */
+static int __init alloc_ppr_log(struct amd_iommu *iommu)
+{
+ iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
+ PPR_LOG_SIZE);
+
+ return iommu->ppr_log ? 0 : -ENOMEM;
+}
+
+static void iommu_enable_ppr_log(struct amd_iommu *iommu)
+{
+ u64 entry;
+
+ if (iommu->ppr_log == NULL)
+ return;
+
+ iommu_feature_enable(iommu, CONTROL_PPR_EN);
+
+ entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
+
+ memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
+ &entry, sizeof(entry));
+
+ /* set head and tail to zero manually */
+ writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+
+ iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
+ iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
+}
+
+static void __init free_ppr_log(struct amd_iommu *iommu)
+{
+ free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
+}
+
+static void free_ga_log(struct amd_iommu *iommu)
+{
+#ifdef CONFIG_IRQ_REMAP
+ free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
+ free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
+#endif
+}
+
+#ifdef CONFIG_IRQ_REMAP
+static int iommu_ga_log_enable(struct amd_iommu *iommu)
+{
+ u32 status, i;
+ u64 entry;
+
+ if (!iommu->ga_log)
+ return -EINVAL;
+
+ entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
+ memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
+ &entry, sizeof(entry));
+ entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
+ (BIT_ULL(52)-1)) & ~7ULL;
+ memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
+ &entry, sizeof(entry));
+ writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
+
+
+ iommu_feature_enable(iommu, CONTROL_GAINT_EN);
+ iommu_feature_enable(iommu, CONTROL_GALOG_EN);
+
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (status & (MMIO_STATUS_GALOG_RUN_MASK))
+ break;
+ udelay(10);
+ }
+
+ if (WARN_ON(i >= LOOP_TIMEOUT))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int iommu_init_ga_log(struct amd_iommu *iommu)
+{
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
+ return 0;
+
+ iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(GA_LOG_SIZE));
+ if (!iommu->ga_log)
+ goto err_out;
+
+ iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(8));
+ if (!iommu->ga_log_tail)
+ goto err_out;
+
+ return 0;
+err_out:
+ free_ga_log(iommu);
+ return -EINVAL;
+}
+#endif /* CONFIG_IRQ_REMAP */
+
+static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
+{
+ iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
+
+ return iommu->cmd_sem ? 0 : -ENOMEM;
+}
+
+static void __init free_cwwb_sem(struct amd_iommu *iommu)
+{
+ if (iommu->cmd_sem)
+ free_page((unsigned long)iommu->cmd_sem);
+}
+
+static void iommu_enable_xt(struct amd_iommu *iommu)
+{
+#ifdef CONFIG_IRQ_REMAP
+ /*
+ * XT mode (32-bit APIC destination ID) requires
+ * GA mode (128-bit IRTE support) as a prerequisite.
+ */
+ if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
+ amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
+ iommu_feature_enable(iommu, CONTROL_XT_EN);
+#endif /* CONFIG_IRQ_REMAP */
+}
+
+static void iommu_enable_gt(struct amd_iommu *iommu)
+{
+ if (!iommu_feature(iommu, FEATURE_GT))
+ return;
+
+ iommu_feature_enable(iommu, CONTROL_GT_EN);
+}
+
+/* sets a specific bit in the device table entry. */
+static void __set_dev_entry_bit(struct dev_table_entry *dev_table,
+ u16 devid, u8 bit)
+{
+ int i = (bit >> 6) & 0x03;
+ int _bit = bit & 0x3f;
+
+ dev_table[devid].data[i] |= (1UL << _bit);
+}
+
+static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
+{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ return __set_dev_entry_bit(dev_table, devid, bit);
+}
+
+static int __get_dev_entry_bit(struct dev_table_entry *dev_table,
+ u16 devid, u8 bit)
+{
+ int i = (bit >> 6) & 0x03;
+ int _bit = bit & 0x3f;
+
+ return (dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
+}
+
+static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
+{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ return __get_dev_entry_bit(dev_table, devid, bit);
+}
+
+static bool __copy_device_table(struct amd_iommu *iommu)
+{
+ u64 int_ctl, int_tab_len, entry = 0;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+ struct dev_table_entry *old_devtb = NULL;
+ u32 lo, hi, devid, old_devtb_size;
+ phys_addr_t old_devtb_phys;
+ u16 dom_id, dte_v, irq_v;
+ gfp_t gfp_flag;
+ u64 tmp;
+
+ /* Each IOMMU use separate device table with the same size */
+ lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
+ hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
+ entry = (((u64) hi) << 32) + lo;
+
+ old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
+ if (old_devtb_size != pci_seg->dev_table_size) {
+ pr_err("The device table size of IOMMU:%d is not expected!\n",
+ iommu->index);
+ return false;
+ }
+
+ /*
+ * When SME is enabled in the first kernel, the entry includes the
+ * memory encryption mask(sme_me_mask), we must remove the memory
+ * encryption mask to obtain the true physical address in kdump kernel.
+ */
+ old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
+
+ if (old_devtb_phys >= 0x100000000ULL) {
+ pr_err("The address of old device table is above 4G, not trustworthy!\n");
+ return false;
+ }
+ old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
+ ? (__force void *)ioremap_encrypted(old_devtb_phys,
+ pci_seg->dev_table_size)
+ : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
+
+ if (!old_devtb)
+ return false;
+
+ gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
+ pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
+ get_order(pci_seg->dev_table_size));
+ if (pci_seg->old_dev_tbl_cpy == NULL) {
+ pr_err("Failed to allocate memory for copying old device table!\n");
+ memunmap(old_devtb);
+ return false;
+ }
+
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
+ pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
+ dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
+ dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
+
+ if (dte_v && dom_id) {
+ pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
+ pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
+ __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
+ /* If gcr3 table existed, mask it out */
+ if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
+ tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
+ tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
+ pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
+ tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
+ tmp |= DTE_FLAG_GV;
+ pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
+ }
+ }
+
+ irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
+ int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
+ int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
+ if (irq_v && (int_ctl || int_tab_len)) {
+ if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
+ (int_tab_len != DTE_INTTABLEN)) {
+ pr_err("Wrong old irq remapping flag: %#x\n", devid);
+ memunmap(old_devtb);
+ return false;
+ }
+
+ pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
+ }
+ }
+ memunmap(old_devtb);
+
+ return true;
+}
+
+static bool copy_device_table(void)
+{
+ struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
+
+ if (!amd_iommu_pre_enabled)
+ return false;
+
+ pr_warn("Translation is already enabled - trying to copy translation structures\n");
+
+ /*
+ * All IOMMUs within PCI segment shares common device table.
+ * Hence copy device table only once per PCI segment.
+ */
+ for_each_pci_segment(pci_seg) {
+ for_each_iommu(iommu) {
+ if (pci_seg->id != iommu->pci_seg->id)
+ continue;
+ if (!__copy_device_table(iommu))
+ return false;
+ break;
+ }
+ }
+
+ return true;
+}
+
+void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
+{
+ int sysmgt;
+
+ sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) |
+ (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1);
+
+ if (sysmgt == 0x01)
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW);
+}
+
+/*
+ * This function takes the device specific flags read from the ACPI
+ * table and sets up the device table entry with that information
+ */
+static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
+ u16 devid, u32 flags, u32 ext_flags)
+{
+ if (flags & ACPI_DEVFLAG_INITPASS)
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS);
+ if (flags & ACPI_DEVFLAG_EXTINT)
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS);
+ if (flags & ACPI_DEVFLAG_NMI)
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS);
+ if (flags & ACPI_DEVFLAG_SYSMGT1)
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1);
+ if (flags & ACPI_DEVFLAG_SYSMGT2)
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2);
+ if (flags & ACPI_DEVFLAG_LINT0)
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS);
+ if (flags & ACPI_DEVFLAG_LINT1)
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS);
+
+ amd_iommu_apply_erratum_63(iommu, devid);
+
+ amd_iommu_set_rlookup_table(iommu, devid);
+}
+
+int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
+{
+ struct devid_map *entry;
+ struct list_head *list;
+
+ if (type == IVHD_SPECIAL_IOAPIC)
+ list = &ioapic_map;
+ else if (type == IVHD_SPECIAL_HPET)
+ list = &hpet_map;
+ else
+ return -EINVAL;
+
+ list_for_each_entry(entry, list, list) {
+ if (!(entry->id == id && entry->cmd_line))
+ continue;
+
+ pr_info("Command-line override present for %s id %d - ignoring\n",
+ type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
+
+ *devid = entry->devid;
+
+ return 0;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->id = id;
+ entry->devid = *devid;
+ entry->cmd_line = cmd_line;
+
+ list_add_tail(&entry->list, list);
+
+ return 0;
+}
+
+static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid,
+ bool cmd_line)
+{
+ struct acpihid_map_entry *entry;
+ struct list_head *list = &acpihid_map;
+
+ list_for_each_entry(entry, list, list) {
+ if (strcmp(entry->hid, hid) ||
+ (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
+ !entry->cmd_line)
+ continue;
+
+ pr_info("Command-line override for hid:%s uid:%s\n",
+ hid, uid);
+ *devid = entry->devid;
+ return 0;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ memcpy(entry->uid, uid, strlen(uid));
+ memcpy(entry->hid, hid, strlen(hid));
+ entry->devid = *devid;
+ entry->cmd_line = cmd_line;
+ entry->root_devid = (entry->devid & (~0x7));
+
+ pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
+ entry->cmd_line ? "cmd" : "ivrs",
+ entry->hid, entry->uid, entry->root_devid);
+
+ list_add_tail(&entry->list, list);
+ return 0;
+}
+
+static int __init add_early_maps(void)
+{
+ int i, ret;
+
+ for (i = 0; i < early_ioapic_map_size; ++i) {
+ ret = add_special_device(IVHD_SPECIAL_IOAPIC,
+ early_ioapic_map[i].id,
+ &early_ioapic_map[i].devid,
+ early_ioapic_map[i].cmd_line);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < early_hpet_map_size; ++i) {
+ ret = add_special_device(IVHD_SPECIAL_HPET,
+ early_hpet_map[i].id,
+ &early_hpet_map[i].devid,
+ early_hpet_map[i].cmd_line);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < early_acpihid_map_size; ++i) {
+ ret = add_acpi_hid_device(early_acpihid_map[i].hid,
+ early_acpihid_map[i].uid,
+ &early_acpihid_map[i].devid,
+ early_acpihid_map[i].cmd_line);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Takes a pointer to an AMD IOMMU entry in the ACPI table and
+ * initializes the hardware and our data structures with it.
+ */
+static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
+ struct ivhd_header *h)
+{
+ u8 *p = (u8 *)h;
+ u8 *end = p, flags = 0;
+ u16 devid = 0, devid_start = 0, devid_to = 0, seg_id;
+ u32 dev_i, ext_flags = 0;
+ bool alias = false;
+ struct ivhd_entry *e;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+ u32 ivhd_size;
+ int ret;
+
+
+ ret = add_early_maps();
+ if (ret)
+ return ret;
+
+ amd_iommu_apply_ivrs_quirks();
+
+ /*
+ * First save the recommended feature enable bits from ACPI
+ */
+ iommu->acpi_flags = h->flags;
+
+ /*
+ * Done. Now parse the device entries
+ */
+ ivhd_size = get_ivhd_header_size(h);
+ if (!ivhd_size) {
+ pr_err("Unsupported IVHD type %#x\n", h->type);
+ return -EINVAL;
+ }
+
+ p += ivhd_size;
+
+ end += h->length;
+
+
+ while (p < end) {
+ e = (struct ivhd_entry *)p;
+ seg_id = pci_seg->id;
+
+ switch (e->type) {
+ case IVHD_DEV_ALL:
+
+ DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
+
+ for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i)
+ set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
+ break;
+ case IVHD_DEV_SELECT:
+
+ DUMP_printk(" DEV_SELECT\t\t\t devid: %04x:%02x:%02x.%x "
+ "flags: %02x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
+ PCI_SLOT(e->devid),
+ PCI_FUNC(e->devid),
+ e->flags);
+
+ devid = e->devid;
+ set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
+ break;
+ case IVHD_DEV_SELECT_RANGE_START:
+
+ DUMP_printk(" DEV_SELECT_RANGE_START\t "
+ "devid: %04x:%02x:%02x.%x flags: %02x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
+ PCI_SLOT(e->devid),
+ PCI_FUNC(e->devid),
+ e->flags);
+
+ devid_start = e->devid;
+ flags = e->flags;
+ ext_flags = 0;
+ alias = false;
+ break;
+ case IVHD_DEV_ALIAS:
+
+ DUMP_printk(" DEV_ALIAS\t\t\t devid: %04x:%02x:%02x.%x "
+ "flags: %02x devid_to: %02x:%02x.%x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
+ PCI_SLOT(e->devid),
+ PCI_FUNC(e->devid),
+ e->flags,
+ PCI_BUS_NUM(e->ext >> 8),
+ PCI_SLOT(e->ext >> 8),
+ PCI_FUNC(e->ext >> 8));
+
+ devid = e->devid;
+ devid_to = e->ext >> 8;
+ set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
+ set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
+ pci_seg->alias_table[devid] = devid_to;
+ break;
+ case IVHD_DEV_ALIAS_RANGE:
+
+ DUMP_printk(" DEV_ALIAS_RANGE\t\t "
+ "devid: %04x:%02x:%02x.%x flags: %02x "
+ "devid_to: %04x:%02x:%02x.%x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
+ PCI_SLOT(e->devid),
+ PCI_FUNC(e->devid),
+ e->flags,
+ seg_id, PCI_BUS_NUM(e->ext >> 8),
+ PCI_SLOT(e->ext >> 8),
+ PCI_FUNC(e->ext >> 8));
+
+ devid_start = e->devid;
+ flags = e->flags;
+ devid_to = e->ext >> 8;
+ ext_flags = 0;
+ alias = true;
+ break;
+ case IVHD_DEV_EXT_SELECT:
+
+ DUMP_printk(" DEV_EXT_SELECT\t\t devid: %04x:%02x:%02x.%x "
+ "flags: %02x ext: %08x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
+ PCI_SLOT(e->devid),
+ PCI_FUNC(e->devid),
+ e->flags, e->ext);
+
+ devid = e->devid;
+ set_dev_entry_from_acpi(iommu, devid, e->flags,
+ e->ext);
+ break;
+ case IVHD_DEV_EXT_SELECT_RANGE:
+
+ DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
+ "%04x:%02x:%02x.%x flags: %02x ext: %08x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
+ PCI_SLOT(e->devid),
+ PCI_FUNC(e->devid),
+ e->flags, e->ext);
+
+ devid_start = e->devid;
+ flags = e->flags;
+ ext_flags = e->ext;
+ alias = false;
+ break;
+ case IVHD_DEV_RANGE_END:
+
+ DUMP_printk(" DEV_RANGE_END\t\t devid: %04x:%02x:%02x.%x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
+ PCI_SLOT(e->devid),
+ PCI_FUNC(e->devid));
+
+ devid = e->devid;
+ for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
+ if (alias) {
+ pci_seg->alias_table[dev_i] = devid_to;
+ set_dev_entry_from_acpi(iommu,
+ devid_to, flags, ext_flags);
+ }
+ set_dev_entry_from_acpi(iommu, dev_i,
+ flags, ext_flags);
+ }
+ break;
+ case IVHD_DEV_SPECIAL: {
+ u8 handle, type;
+ const char *var;
+ u32 devid;
+ int ret;
+
+ handle = e->ext & 0xff;
+ devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8));
+ type = (e->ext >> 24) & 0xff;
+
+ if (type == IVHD_SPECIAL_IOAPIC)
+ var = "IOAPIC";
+ else if (type == IVHD_SPECIAL_HPET)
+ var = "HPET";
+ else
+ var = "UNKNOWN";
+
+ DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x\n",
+ var, (int)handle,
+ seg_id, PCI_BUS_NUM(devid),
+ PCI_SLOT(devid),
+ PCI_FUNC(devid));
+
+ ret = add_special_device(type, handle, &devid, false);
+ if (ret)
+ return ret;
+
+ /*
+ * add_special_device might update the devid in case a
+ * command-line override is present. So call
+ * set_dev_entry_from_acpi after add_special_device.
+ */
+ set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
+
+ break;
+ }
+ case IVHD_DEV_ACPI_HID: {
+ u32 devid;
+ u8 hid[ACPIHID_HID_LEN];
+ u8 uid[ACPIHID_UID_LEN];
+ int ret;
+
+ if (h->type != 0x40) {
+ pr_err(FW_BUG "Invalid IVHD device type %#x\n",
+ e->type);
+ break;
+ }
+
+ BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1);
+ memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1);
+ hid[ACPIHID_HID_LEN - 1] = '\0';
+
+ if (!(*hid)) {
+ pr_err(FW_BUG "Invalid HID.\n");
+ break;
+ }
+
+ uid[0] = '\0';
+ switch (e->uidf) {
+ case UID_NOT_PRESENT:
+
+ if (e->uidl != 0)
+ pr_warn(FW_BUG "Invalid UID length.\n");
+
+ break;
+ case UID_IS_INTEGER:
+
+ sprintf(uid, "%d", e->uid);
+
+ break;
+ case UID_IS_CHARACTER:
+
+ memcpy(uid, &e->uid, e->uidl);
+ uid[e->uidl] = '\0';
+
+ break;
+ default:
+ break;
+ }
+
+ devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid);
+ DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x\n",
+ hid, uid, seg_id,
+ PCI_BUS_NUM(devid),
+ PCI_SLOT(devid),
+ PCI_FUNC(devid));
+
+ flags = e->flags;
+
+ ret = add_acpi_hid_device(hid, uid, &devid, false);
+ if (ret)
+ return ret;
+
+ /*
+ * add_special_device might update the devid in case a
+ * command-line override is present. So call
+ * set_dev_entry_from_acpi after add_special_device.
+ */
+ set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
+
+ break;
+ }
+ default:
+ break;
+ }
+
+ p += ivhd_entry_length(p);
+ }
+
+ return 0;
+}
+
+/* Allocate PCI segment data structure */
+static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
+ struct acpi_table_header *ivrs_base)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+ int last_bdf;
+
+ /*
+ * First parse ACPI tables to find the largest Bus/Dev/Func we need to
+ * handle in this PCI segment. Upon this information the shared data
+ * structures for the PCI segments in the system will be allocated.
+ */
+ last_bdf = find_last_devid_acpi(ivrs_base, id);
+ if (last_bdf < 0)
+ return NULL;
+
+ pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL);
+ if (pci_seg == NULL)
+ return NULL;
+
+ pci_seg->last_bdf = last_bdf;
+ DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
+ pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf);
+ pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf);
+ pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf);
+
+ pci_seg->id = id;
+ init_llist_head(&pci_seg->dev_data_list);
+ INIT_LIST_HEAD(&pci_seg->unity_map);
+ list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list);
+
+ if (alloc_dev_table(pci_seg))
+ return NULL;
+ if (alloc_alias_table(pci_seg))
+ return NULL;
+ if (alloc_rlookup_table(pci_seg))
+ return NULL;
+
+ return pci_seg;
+}
+
+static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id,
+ struct acpi_table_header *ivrs_base)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id == id)
+ return pci_seg;
+ }
+
+ return alloc_pci_segment(id, ivrs_base);
+}
+
+static void __init free_pci_segments(void)
+{
+ struct amd_iommu_pci_seg *pci_seg, *next;
+
+ for_each_pci_segment_safe(pci_seg, next) {
+ list_del(&pci_seg->list);
+ free_irq_lookup_table(pci_seg);
+ free_rlookup_table(pci_seg);
+ free_alias_table(pci_seg);
+ free_dev_table(pci_seg);
+ kfree(pci_seg);
+ }
+}
+
+static void __init free_iommu_one(struct amd_iommu *iommu)
+{
+ free_cwwb_sem(iommu);
+ free_command_buffer(iommu);
+ free_event_buffer(iommu);
+ free_ppr_log(iommu);
+ free_ga_log(iommu);
+ iommu_unmap_mmio_space(iommu);
+}
+
+static void __init free_iommu_all(void)
+{
+ struct amd_iommu *iommu, *next;
+
+ for_each_iommu_safe(iommu, next) {
+ list_del(&iommu->list);
+ free_iommu_one(iommu);
+ kfree(iommu);
+ }
+}
+
+/*
+ * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
+ * Workaround:
+ * BIOS should disable L2B micellaneous clock gating by setting
+ * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
+ */
+static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
+{
+ u32 value;
+
+ if ((boot_cpu_data.x86 != 0x15) ||
+ (boot_cpu_data.x86_model < 0x10) ||
+ (boot_cpu_data.x86_model > 0x1f))
+ return;
+
+ pci_write_config_dword(iommu->dev, 0xf0, 0x90);
+ pci_read_config_dword(iommu->dev, 0xf4, &value);
+
+ if (value & BIT(2))
+ return;
+
+ /* Select NB indirect register 0x90 and enable writing */
+ pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
+
+ pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
+ pci_info(iommu->dev, "Applying erratum 746 workaround\n");
+
+ /* Clear the enable writing bit */
+ pci_write_config_dword(iommu->dev, 0xf0, 0x90);
+}
+
+/*
+ * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
+ * Workaround:
+ * BIOS should enable ATS write permission check by setting
+ * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
+ */
+static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
+{
+ u32 value;
+
+ if ((boot_cpu_data.x86 != 0x15) ||
+ (boot_cpu_data.x86_model < 0x30) ||
+ (boot_cpu_data.x86_model > 0x3f))
+ return;
+
+ /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
+ value = iommu_read_l2(iommu, 0x47);
+
+ if (value & BIT(0))
+ return;
+
+ /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
+ iommu_write_l2(iommu, 0x47, value | BIT(0));
+
+ pci_info(iommu->dev, "Applying ATS write check workaround\n");
+}
+
+/*
+ * This function glues the initialization function for one IOMMU
+ * together and also allocates the command buffer and programs the
+ * hardware. It does NOT enable the IOMMU. This is done afterwards.
+ */
+static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
+ struct acpi_table_header *ivrs_base)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+
+ pci_seg = get_pci_segment(h->pci_seg, ivrs_base);
+ if (pci_seg == NULL)
+ return -ENOMEM;
+ iommu->pci_seg = pci_seg;
+
+ raw_spin_lock_init(&iommu->lock);
+ atomic64_set(&iommu->cmd_sem_val, 0);
+
+ /* Add IOMMU to internal data structures */
+ list_add_tail(&iommu->list, &amd_iommu_list);
+ iommu->index = amd_iommus_present++;
+
+ if (unlikely(iommu->index >= MAX_IOMMUS)) {
+ WARN(1, "System has more IOMMUs than supported by this driver\n");
+ return -ENOSYS;
+ }
+
+ /* Index is fine - add IOMMU to the array */
+ amd_iommus[iommu->index] = iommu;
+
+ /*
+ * Copy data from ACPI table entry to the iommu struct
+ */
+ iommu->devid = h->devid;
+ iommu->cap_ptr = h->cap_ptr;
+ iommu->mmio_phys = h->mmio_phys;
+
+ switch (h->type) {
+ case 0x10:
+ /* Check if IVHD EFR contains proper max banks/counters */
+ if ((h->efr_attr != 0) &&
+ ((h->efr_attr & (0xF << 13)) != 0) &&
+ ((h->efr_attr & (0x3F << 17)) != 0))
+ iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
+ else
+ iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
+
+ /*
+ * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
+ * GAM also requires GA mode. Therefore, we need to
+ * check cmpxchg16b support before enabling it.
+ */
+ if (!boot_cpu_has(X86_FEATURE_CX16) ||
+ ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+ break;
+ case 0x11:
+ case 0x40:
+ if (h->efr_reg & (1 << 9))
+ iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
+ else
+ iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
+
+ /*
+ * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
+ * XT, GAM also requires GA mode. Therefore, we need to
+ * check cmpxchg16b support before enabling them.
+ */
+ if (!boot_cpu_has(X86_FEATURE_CX16) ||
+ ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+ break;
+ }
+
+ if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
+ amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+
+ early_iommu_features_init(iommu, h);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
+ iommu->mmio_phys_end);
+ if (!iommu->mmio_base)
+ return -ENOMEM;
+
+ return init_iommu_from_acpi(iommu, h);
+}
+
+static int __init init_iommu_one_late(struct amd_iommu *iommu)
+{
+ int ret;
+
+ if (alloc_cwwb_sem(iommu))
+ return -ENOMEM;
+
+ if (alloc_command_buffer(iommu))
+ return -ENOMEM;
+
+ if (alloc_event_buffer(iommu))
+ return -ENOMEM;
+
+ iommu->int_enabled = false;
+
+ init_translation_status(iommu);
+ if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
+ iommu_disable(iommu);
+ clear_translation_pre_enabled(iommu);
+ pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
+ iommu->index);
+ }
+ if (amd_iommu_pre_enabled)
+ amd_iommu_pre_enabled = translation_pre_enabled(iommu);
+
+ if (amd_iommu_irq_remap) {
+ ret = amd_iommu_create_irq_domain(iommu);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Make sure IOMMU is not considered to translate itself. The IVRS
+ * table tells us so, but this is a lie!
+ */
+ iommu->pci_seg->rlookup_table[iommu->devid] = NULL;
+
+ return 0;
+}
+
+/**
+ * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
+ * @ivrs: Pointer to the IVRS header
+ *
+ * This function search through all IVDB of the maximum supported IVHD
+ */
+static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
+{
+ u8 *base = (u8 *)ivrs;
+ struct ivhd_header *ivhd = (struct ivhd_header *)
+ (base + IVRS_HEADER_LENGTH);
+ u8 last_type = ivhd->type;
+ u16 devid = ivhd->devid;
+
+ while (((u8 *)ivhd - base < ivrs->length) &&
+ (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
+ u8 *p = (u8 *) ivhd;
+
+ if (ivhd->devid == devid)
+ last_type = ivhd->type;
+ ivhd = (struct ivhd_header *)(p + ivhd->length);
+ }
+
+ return last_type;
+}
+
+/*
+ * Iterates over all IOMMU entries in the ACPI table, allocates the
+ * IOMMU structure and initializes it with init_iommu_one()
+ */
+static int __init init_iommu_all(struct acpi_table_header *table)
+{
+ u8 *p = (u8 *)table, *end = (u8 *)table;
+ struct ivhd_header *h;
+ struct amd_iommu *iommu;
+ int ret;
+
+ end += table->length;
+ p += IVRS_HEADER_LENGTH;
+
+ /* Phase 1: Process all IVHD blocks */
+ while (p < end) {
+ h = (struct ivhd_header *)p;
+ if (*p == amd_iommu_target_ivhd_type) {
+
+ DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x "
+ "flags: %01x info %04x\n",
+ h->pci_seg, PCI_BUS_NUM(h->devid),
+ PCI_SLOT(h->devid), PCI_FUNC(h->devid),
+ h->cap_ptr, h->flags, h->info);
+ DUMP_printk(" mmio-addr: %016llx\n",
+ h->mmio_phys);
+
+ iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
+ if (iommu == NULL)
+ return -ENOMEM;
+
+ ret = init_iommu_one(iommu, h, table);
+ if (ret)
+ return ret;
+ }
+ p += h->length;
+
+ }
+ WARN_ON(p != end);
+
+ /* Phase 2 : Early feature support check */
+ get_global_efr();
+
+ /* Phase 3 : Enabling IOMMU features */
+ for_each_iommu(iommu) {
+ ret = init_iommu_one_late(iommu);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void init_iommu_perf_ctr(struct amd_iommu *iommu)
+{
+ u64 val;
+ struct pci_dev *pdev = iommu->dev;
+
+ if (!iommu_feature(iommu, FEATURE_PC))
+ return;
+
+ amd_iommu_pc_present = true;
+
+ pci_info(pdev, "IOMMU performance counters supported\n");
+
+ val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
+ iommu->max_banks = (u8) ((val >> 12) & 0x3f);
+ iommu->max_counters = (u8) ((val >> 7) & 0xf);
+
+ return;
+}
+
+static ssize_t amd_iommu_show_cap(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_iommu *iommu = dev_to_amd_iommu(dev);
+ return sysfs_emit(buf, "%x\n", iommu->cap);
+}
+static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
+
+static ssize_t amd_iommu_show_features(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_iommu *iommu = dev_to_amd_iommu(dev);
+ return sysfs_emit(buf, "%llx:%llx\n", iommu->features2, iommu->features);
+}
+static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
+
+static struct attribute *amd_iommu_attrs[] = {
+ &dev_attr_cap.attr,
+ &dev_attr_features.attr,
+ NULL,
+};
+
+static struct attribute_group amd_iommu_group = {
+ .name = "amd-iommu",
+ .attrs = amd_iommu_attrs,
+};
+
+static const struct attribute_group *amd_iommu_groups[] = {
+ &amd_iommu_group,
+ NULL,
+};
+
+/*
+ * Note: IVHD 0x11 and 0x40 also contains exact copy
+ * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
+ * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
+ */
+static void __init late_iommu_features_init(struct amd_iommu *iommu)
+{
+ u64 features, features2;
+
+ if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
+ return;
+
+ /* read extended feature bits */
+ features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+ features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
+
+ if (!iommu->features) {
+ iommu->features = features;
+ iommu->features2 = features2;
+ return;
+ }
+
+ /*
+ * Sanity check and warn if EFR values from
+ * IVHD and MMIO conflict.
+ */
+ if (features != iommu->features ||
+ features2 != iommu->features2) {
+ pr_warn(FW_WARN
+ "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
+ features, iommu->features,
+ features2, iommu->features2);
+ }
+}
+
+static int __init iommu_init_pci(struct amd_iommu *iommu)
+{
+ int cap_ptr = iommu->cap_ptr;
+ int ret;
+
+ iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
+ PCI_BUS_NUM(iommu->devid),
+ iommu->devid & 0xff);
+ if (!iommu->dev)
+ return -ENODEV;
+
+ /* Prevent binding other PCI device drivers to IOMMU devices */
+ iommu->dev->match_driver = false;
+
+ pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
+ &iommu->cap);
+
+ if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
+ amd_iommu_iotlb_sup = false;
+
+ late_iommu_features_init(iommu);
+
+ if (iommu_feature(iommu, FEATURE_GT)) {
+ int glxval;
+ u32 max_pasid;
+ u64 pasmax;
+
+ pasmax = iommu->features & FEATURE_PASID_MASK;
+ pasmax >>= FEATURE_PASID_SHIFT;
+ max_pasid = (1 << (pasmax + 1)) - 1;
+
+ amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
+
+ BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
+
+ glxval = iommu->features & FEATURE_GLXVAL_MASK;
+ glxval >>= FEATURE_GLXVAL_SHIFT;
+
+ if (amd_iommu_max_glx_val == -1)
+ amd_iommu_max_glx_val = glxval;
+ else
+ amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
+ }
+
+ if (iommu_feature(iommu, FEATURE_GT) &&
+ iommu_feature(iommu, FEATURE_PPR)) {
+ iommu->is_iommu_v2 = true;
+ amd_iommu_v2_present = true;
+ }
+
+ if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
+ return -ENOMEM;
+
+ if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
+ pr_info("Using strict mode due to virtualization\n");
+ iommu_set_dma_strict();
+ amd_iommu_np_cache = true;
+ }
+
+ init_iommu_perf_ctr(iommu);
+
+ if (amd_iommu_pgtable == AMD_IOMMU_V2) {
+ if (!iommu_feature(iommu, FEATURE_GIOSUP) ||
+ !iommu_feature(iommu, FEATURE_GT)) {
+ pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ } else if (iommu_default_passthrough()) {
+ pr_warn("V2 page table doesn't support passthrough mode. Fallback to v1.\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ }
+ }
+
+ if (is_rd890_iommu(iommu->dev)) {
+ int i, j;
+
+ iommu->root_pdev =
+ pci_get_domain_bus_and_slot(iommu->pci_seg->id,
+ iommu->dev->bus->number,
+ PCI_DEVFN(0, 0));
+
+ /*
+ * Some rd890 systems may not be fully reconfigured by the
+ * BIOS, so it's necessary for us to store this information so
+ * it can be reprogrammed on resume
+ */
+ pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
+ &iommu->stored_addr_lo);
+ pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
+ &iommu->stored_addr_hi);
+
+ /* Low bit locks writes to configuration space */
+ iommu->stored_addr_lo &= ~1;
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 0x12; j++)
+ iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
+
+ for (i = 0; i < 0x83; i++)
+ iommu->stored_l2[i] = iommu_read_l2(iommu, i);
+ }
+
+ amd_iommu_erratum_746_workaround(iommu);
+ amd_iommu_ats_write_check_workaround(iommu);
+
+ ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
+ amd_iommu_groups, "ivhd%d", iommu->index);
+ if (ret)
+ return ret;
+
+ iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
+
+ return pci_enable_device(iommu->dev);
+}
+
+static void print_iommu_info(void)
+{
+ static const char * const feat_str[] = {
+ "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
+ "IA", "GA", "HE", "PC"
+ };
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ struct pci_dev *pdev = iommu->dev;
+ int i;
+
+ pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
+
+ if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
+ pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2);
+
+ for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
+ if (iommu_feature(iommu, (1ULL << i)))
+ pr_cont(" %s", feat_str[i]);
+ }
+
+ if (iommu->features & FEATURE_GAM_VAPIC)
+ pr_cont(" GA_vAPIC");
+
+ if (iommu->features & FEATURE_SNP)
+ pr_cont(" SNP");
+
+ pr_cont("\n");
+ }
+ }
+ if (irq_remapping_enabled) {
+ pr_info("Interrupt remapping enabled\n");
+ if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
+ pr_info("X2APIC enabled\n");
+ }
+ if (amd_iommu_pgtable == AMD_IOMMU_V2) {
+ pr_info("V2 page table enabled (Paging mode : %d level)\n",
+ amd_iommu_gpt_level);
+ }
+}
+
+static int __init amd_iommu_init_pci(void)
+{
+ struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
+ int ret;
+
+ for_each_iommu(iommu) {
+ ret = iommu_init_pci(iommu);
+ if (ret) {
+ pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n",
+ iommu->index, ret);
+ goto out;
+ }
+ /* Need to setup range after PCI init */
+ iommu_set_cwwb_range(iommu);
+ }
+
+ /*
+ * Order is important here to make sure any unity map requirements are
+ * fulfilled. The unity mappings are created and written to the device
+ * table during the iommu_init_pci() call.
+ *
+ * After that we call init_device_table_dma() to make sure any
+ * uninitialized DTE will block DMA, and in the end we flush the caches
+ * of all IOMMUs to make sure the changes to the device table are
+ * active.
+ */
+ for_each_pci_segment(pci_seg)
+ init_device_table_dma(pci_seg);
+
+ for_each_iommu(iommu)
+ iommu_flush_all_caches(iommu);
+
+ print_iommu_info();
+
+out:
+ return ret;
+}
+
+/****************************************************************************
+ *
+ * The following functions initialize the MSI interrupts for all IOMMUs
+ * in the system. It's a bit challenging because there could be multiple
+ * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
+ * pci_dev.
+ *
+ ****************************************************************************/
+
+static int iommu_setup_msi(struct amd_iommu *iommu)
+{
+ int r;
+
+ r = pci_enable_msi(iommu->dev);
+ if (r)
+ return r;
+
+ r = request_threaded_irq(iommu->dev->irq,
+ amd_iommu_int_handler,
+ amd_iommu_int_thread,
+ 0, "AMD-Vi",
+ iommu);
+
+ if (r) {
+ pci_disable_msi(iommu->dev);
+ return r;
+ }
+
+ return 0;
+}
+
+union intcapxt {
+ u64 capxt;
+ struct {
+ u64 reserved_0 : 2,
+ dest_mode_logical : 1,
+ reserved_1 : 5,
+ destid_0_23 : 24,
+ vector : 8,
+ reserved_2 : 16,
+ destid_24_31 : 8;
+ };
+} __attribute__ ((packed));
+
+
+static struct irq_chip intcapxt_controller;
+
+static int intcapxt_irqdomain_activate(struct irq_domain *domain,
+ struct irq_data *irqd, bool reserve)
+{
+ return 0;
+}
+
+static void intcapxt_irqdomain_deactivate(struct irq_domain *domain,
+ struct irq_data *irqd)
+{
+}
+
+
+static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct irq_alloc_info *info = arg;
+ int i, ret;
+
+ if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI)
+ return -EINVAL;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+ if (ret < 0)
+ return ret;
+
+ for (i = virq; i < virq + nr_irqs; i++) {
+ struct irq_data *irqd = irq_domain_get_irq_data(domain, i);
+
+ irqd->chip = &intcapxt_controller;
+ irqd->hwirq = info->hwirq;
+ irqd->chip_data = info->data;
+ __irq_set_handler(i, handle_edge_irq, 0, "edge");
+ }
+
+ return ret;
+}
+
+static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ irq_domain_free_irqs_top(domain, virq, nr_irqs);
+}
+
+
+static void intcapxt_unmask_irq(struct irq_data *irqd)
+{
+ struct amd_iommu *iommu = irqd->chip_data;
+ struct irq_cfg *cfg = irqd_cfg(irqd);
+ union intcapxt xt;
+
+ xt.capxt = 0ULL;
+ xt.dest_mode_logical = apic->dest_mode_logical;
+ xt.vector = cfg->vector;
+ xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
+ xt.destid_24_31 = cfg->dest_apicid >> 24;
+
+ writeq(xt.capxt, iommu->mmio_base + irqd->hwirq);
+}
+
+static void intcapxt_mask_irq(struct irq_data *irqd)
+{
+ struct amd_iommu *iommu = irqd->chip_data;
+
+ writeq(0, iommu->mmio_base + irqd->hwirq);
+}
+
+
+static int intcapxt_set_affinity(struct irq_data *irqd,
+ const struct cpumask *mask, bool force)
+{
+ struct irq_data *parent = irqd->parent_data;
+ int ret;
+
+ ret = parent->chip->irq_set_affinity(parent, mask, force);
+ if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
+ return ret;
+ return 0;
+}
+
+static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on)
+{
+ return on ? -EOPNOTSUPP : 0;
+}
+
+static struct irq_chip intcapxt_controller = {
+ .name = "IOMMU-MSI",
+ .irq_unmask = intcapxt_unmask_irq,
+ .irq_mask = intcapxt_mask_irq,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_affinity = intcapxt_set_affinity,
+ .irq_set_wake = intcapxt_set_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static const struct irq_domain_ops intcapxt_domain_ops = {
+ .alloc = intcapxt_irqdomain_alloc,
+ .free = intcapxt_irqdomain_free,
+ .activate = intcapxt_irqdomain_activate,
+ .deactivate = intcapxt_irqdomain_deactivate,
+};
+
+
+static struct irq_domain *iommu_irqdomain;
+
+static struct irq_domain *iommu_get_irqdomain(void)
+{
+ struct fwnode_handle *fn;
+
+ /* No need for locking here (yet) as the init is single-threaded */
+ if (iommu_irqdomain)
+ return iommu_irqdomain;
+
+ fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI");
+ if (!fn)
+ return NULL;
+
+ iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0,
+ fn, &intcapxt_domain_ops,
+ NULL);
+ if (!iommu_irqdomain)
+ irq_domain_free_fwnode(fn);
+
+ return iommu_irqdomain;
+}
+
+static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname,
+ int hwirq, irq_handler_t thread_fn)
+{
+ struct irq_domain *domain;
+ struct irq_alloc_info info;
+ int irq, ret;
+ int node = dev_to_node(&iommu->dev->dev);
+
+ domain = iommu_get_irqdomain();
+ if (!domain)
+ return -ENXIO;
+
+ init_irq_alloc_info(&info, NULL);
+ info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
+ info.data = iommu;
+ info.hwirq = hwirq;
+
+ irq = irq_domain_alloc_irqs(domain, 1, node, &info);
+ if (irq < 0) {
+ irq_domain_remove(domain);
+ return irq;
+ }
+
+ ret = request_threaded_irq(irq, amd_iommu_int_handler,
+ thread_fn, 0, devname, iommu);
+ if (ret) {
+ irq_domain_free_irqs(irq, 1);
+ irq_domain_remove(domain);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iommu_setup_intcapxt(struct amd_iommu *iommu)
+{
+ int ret;
+
+ snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name),
+ "AMD-Vi%d-Evt", iommu->index);
+ ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name,
+ MMIO_INTCAPXT_EVT_OFFSET,
+ amd_iommu_int_thread_evtlog);
+ if (ret)
+ return ret;
+
+ snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name),
+ "AMD-Vi%d-PPR", iommu->index);
+ ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name,
+ MMIO_INTCAPXT_PPR_OFFSET,
+ amd_iommu_int_thread_pprlog);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_IRQ_REMAP
+ snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name),
+ "AMD-Vi%d-GA", iommu->index);
+ ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name,
+ MMIO_INTCAPXT_GALOG_OFFSET,
+ amd_iommu_int_thread_galog);
+#endif
+
+ return ret;
+}
+
+static int iommu_init_irq(struct amd_iommu *iommu)
+{
+ int ret;
+
+ if (iommu->int_enabled)
+ goto enable_faults;
+
+ if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
+ ret = iommu_setup_intcapxt(iommu);
+ else if (iommu->dev->msi_cap)
+ ret = iommu_setup_msi(iommu);
+ else
+ ret = -ENODEV;
+
+ if (ret)
+ return ret;
+
+ iommu->int_enabled = true;
+enable_faults:
+
+ if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
+ iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
+
+ iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
+
+ return 0;
+}
+
+/****************************************************************************
+ *
+ * The next functions belong to the third pass of parsing the ACPI
+ * table. In this last pass the memory mapping requirements are
+ * gathered (like exclusion and unity mapping ranges).
+ *
+ ****************************************************************************/
+
+static void __init free_unity_maps(void)
+{
+ struct unity_map_entry *entry, *next;
+ struct amd_iommu_pci_seg *p, *pci_seg;
+
+ for_each_pci_segment_safe(pci_seg, p) {
+ list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+}
+
+/* called for unity map ACPI definition */
+static int __init init_unity_map_range(struct ivmd_header *m,
+ struct acpi_table_header *ivrs_base)
+{
+ struct unity_map_entry *e = NULL;
+ struct amd_iommu_pci_seg *pci_seg;
+ char *s;
+
+ pci_seg = get_pci_segment(m->pci_seg, ivrs_base);
+ if (pci_seg == NULL)
+ return -ENOMEM;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (e == NULL)
+ return -ENOMEM;
+
+ switch (m->type) {
+ default:
+ kfree(e);
+ return 0;
+ case ACPI_IVMD_TYPE:
+ s = "IVMD_TYPEi\t\t\t";
+ e->devid_start = e->devid_end = m->devid;
+ break;
+ case ACPI_IVMD_TYPE_ALL:
+ s = "IVMD_TYPE_ALL\t\t";
+ e->devid_start = 0;
+ e->devid_end = pci_seg->last_bdf;
+ break;
+ case ACPI_IVMD_TYPE_RANGE:
+ s = "IVMD_TYPE_RANGE\t\t";
+ e->devid_start = m->devid;
+ e->devid_end = m->aux;
+ break;
+ }
+ e->address_start = PAGE_ALIGN(m->range_start);
+ e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
+ e->prot = m->flags >> 1;
+
+ /*
+ * Treat per-device exclusion ranges as r/w unity-mapped regions
+ * since some buggy BIOSes might lead to the overwritten exclusion
+ * range (exclusion_start and exclusion_length members). This
+ * happens when there are multiple exclusion ranges (IVMD entries)
+ * defined in ACPI table.
+ */
+ if (m->flags & IVMD_FLAG_EXCL_RANGE)
+ e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
+
+ DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: "
+ "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx"
+ " flags: %x\n", s, m->pci_seg,
+ PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
+ PCI_FUNC(e->devid_start), m->pci_seg,
+ PCI_BUS_NUM(e->devid_end),
+ PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
+ e->address_start, e->address_end, m->flags);
+
+ list_add_tail(&e->list, &pci_seg->unity_map);
+
+ return 0;
+}
+
+/* iterates over all memory definitions we find in the ACPI table */
+static int __init init_memory_definitions(struct acpi_table_header *table)
+{
+ u8 *p = (u8 *)table, *end = (u8 *)table;
+ struct ivmd_header *m;
+
+ end += table->length;
+ p += IVRS_HEADER_LENGTH;
+
+ while (p < end) {
+ m = (struct ivmd_header *)p;
+ if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
+ init_unity_map_range(m, table);
+
+ p += m->length;
+ }
+
+ return 0;
+}
+
+/*
+ * Init the device table to not allow DMA access for devices
+ */
+static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
+{
+ u32 devid;
+ struct dev_table_entry *dev_table = pci_seg->dev_table;
+
+ if (dev_table == NULL)
+ return;
+
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
+ __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID);
+ if (!amd_iommu_snp_en)
+ __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION);
+ }
+}
+
+static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
+{
+ u32 devid;
+ struct dev_table_entry *dev_table = pci_seg->dev_table;
+
+ if (dev_table == NULL)
+ return;
+
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
+ dev_table[devid].data[0] = 0ULL;
+ dev_table[devid].data[1] = 0ULL;
+ }
+}
+
+static void init_device_table(void)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+ u32 devid;
+
+ if (!amd_iommu_irq_remap)
+ return;
+
+ for_each_pci_segment(pci_seg) {
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
+ __set_dev_entry_bit(pci_seg->dev_table,
+ devid, DEV_ENTRY_IRQ_TBL_EN);
+ }
+}
+
+static void iommu_init_flags(struct amd_iommu *iommu)
+{
+ iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
+ iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
+
+ iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
+ iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
+
+ iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
+ iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
+
+ iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
+ iommu_feature_disable(iommu, CONTROL_ISOC_EN);
+
+ /*
+ * make IOMMU memory accesses cache coherent
+ */
+ iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+
+ /* Set IOTLB invalidation timeout to 1s */
+ iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
+}
+
+static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
+{
+ int i, j;
+ u32 ioc_feature_control;
+ struct pci_dev *pdev = iommu->root_pdev;
+
+ /* RD890 BIOSes may not have completely reconfigured the iommu */
+ if (!is_rd890_iommu(iommu->dev) || !pdev)
+ return;
+
+ /*
+ * First, we need to ensure that the iommu is enabled. This is
+ * controlled by a register in the northbridge
+ */
+
+ /* Select Northbridge indirect register 0x75 and enable writing */
+ pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
+ pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
+
+ /* Enable the iommu */
+ if (!(ioc_feature_control & 0x1))
+ pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
+
+ /* Restore the iommu BAR */
+ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
+ iommu->stored_addr_lo);
+ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
+ iommu->stored_addr_hi);
+
+ /* Restore the l1 indirect regs for each of the 6 l1s */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 0x12; j++)
+ iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
+
+ /* Restore the l2 indirect regs */
+ for (i = 0; i < 0x83; i++)
+ iommu_write_l2(iommu, i, iommu->stored_l2[i]);
+
+ /* Lock PCI setup registers */
+ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
+ iommu->stored_addr_lo | 1);
+}
+
+static void iommu_enable_ga(struct amd_iommu *iommu)
+{
+#ifdef CONFIG_IRQ_REMAP
+ switch (amd_iommu_guest_ir) {
+ case AMD_IOMMU_GUEST_IR_VAPIC:
+ case AMD_IOMMU_GUEST_IR_LEGACY_GA:
+ iommu_feature_enable(iommu, CONTROL_GA_EN);
+ iommu->irte_ops = &irte_128_ops;
+ break;
+ default:
+ iommu->irte_ops = &irte_32_ops;
+ break;
+ }
+#endif
+}
+
+static void iommu_disable_irtcachedis(struct amd_iommu *iommu)
+{
+ iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
+}
+
+static void iommu_enable_irtcachedis(struct amd_iommu *iommu)
+{
+ u64 ctrl;
+
+ if (!amd_iommu_irtcachedis)
+ return;
+
+ /*
+ * Note:
+ * The support for IRTCacheDis feature is dertermined by
+ * checking if the bit is writable.
+ */
+ iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS);
+ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl &= (1ULL << CONTROL_IRTCACHEDIS);
+ if (ctrl)
+ iommu->irtcachedis_enabled = true;
+ pr_info("iommu%d (%#06x) : IRT cache is %s\n",
+ iommu->index, iommu->devid,
+ iommu->irtcachedis_enabled ? "disabled" : "enabled");
+}
+
+static void early_enable_iommu(struct amd_iommu *iommu)
+{
+ iommu_disable(iommu);
+ iommu_init_flags(iommu);
+ iommu_set_device_table(iommu);
+ iommu_enable_command_buffer(iommu);
+ iommu_enable_event_buffer(iommu);
+ iommu_set_exclusion_range(iommu);
+ iommu_enable_ga(iommu);
+ iommu_enable_xt(iommu);
+ iommu_enable_irtcachedis(iommu);
+ iommu_enable(iommu);
+ iommu_flush_all_caches(iommu);
+}
+
+/*
+ * This function finally enables all IOMMUs found in the system after
+ * they have been initialized.
+ *
+ * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
+ * the old content of device table entries. Not this case or copy failed,
+ * just continue as normal kernel does.
+ */
+static void early_enable_iommus(void)
+{
+ struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
+
+ if (!copy_device_table()) {
+ /*
+ * If come here because of failure in copying device table from old
+ * kernel with all IOMMUs enabled, print error message and try to
+ * free allocated old_dev_tbl_cpy.
+ */
+ if (amd_iommu_pre_enabled)
+ pr_err("Failed to copy DEV table from previous kernel.\n");
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->old_dev_tbl_cpy != NULL) {
+ free_pages((unsigned long)pci_seg->old_dev_tbl_cpy,
+ get_order(pci_seg->dev_table_size));
+ pci_seg->old_dev_tbl_cpy = NULL;
+ }
+ }
+
+ for_each_iommu(iommu) {
+ clear_translation_pre_enabled(iommu);
+ early_enable_iommu(iommu);
+ }
+ } else {
+ pr_info("Copied DEV table from previous kernel.\n");
+
+ for_each_pci_segment(pci_seg) {
+ free_pages((unsigned long)pci_seg->dev_table,
+ get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
+ }
+
+ for_each_iommu(iommu) {
+ iommu_disable_command_buffer(iommu);
+ iommu_disable_event_buffer(iommu);
+ iommu_disable_irtcachedis(iommu);
+ iommu_enable_command_buffer(iommu);
+ iommu_enable_event_buffer(iommu);
+ iommu_enable_ga(iommu);
+ iommu_enable_xt(iommu);
+ iommu_enable_irtcachedis(iommu);
+ iommu_set_device_table(iommu);
+ iommu_flush_all_caches(iommu);
+ }
+ }
+}
+
+static void enable_iommus_v2(void)
+{
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ iommu_enable_ppr_log(iommu);
+ iommu_enable_gt(iommu);
+ }
+}
+
+static void enable_iommus_vapic(void)
+{
+#ifdef CONFIG_IRQ_REMAP
+ u32 status, i;
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ /*
+ * Disable GALog if already running. It could have been enabled
+ * in the previous boot before kdump.
+ */
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
+ continue;
+
+ iommu_feature_disable(iommu, CONTROL_GALOG_EN);
+ iommu_feature_disable(iommu, CONTROL_GAINT_EN);
+
+ /*
+ * Need to set and poll check the GALOGRun bit to zero before
+ * we can set/ modify GA Log registers safely.
+ */
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
+ break;
+ udelay(10);
+ }
+
+ if (WARN_ON(i >= LOOP_TIMEOUT))
+ return;
+ }
+
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
+ !check_feature_on_all_iommus(FEATURE_GAM_VAPIC)) {
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+ return;
+ }
+
+ if (amd_iommu_snp_en &&
+ !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) {
+ pr_warn("Force to disable Virtual APIC due to SNP\n");
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+ return;
+ }
+
+ /* Enabling GAM and SNPAVIC support */
+ for_each_iommu(iommu) {
+ if (iommu_init_ga_log(iommu) ||
+ iommu_ga_log_enable(iommu))
+ return;
+
+ iommu_feature_enable(iommu, CONTROL_GAM_EN);
+ if (amd_iommu_snp_en)
+ iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN);
+ }
+
+ amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
+ pr_info("Virtual APIC enabled\n");
+#endif
+}
+
+static void enable_iommus(void)
+{
+ early_enable_iommus();
+}
+
+static void disable_iommus(void)
+{
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu)
+ iommu_disable(iommu);
+
+#ifdef CONFIG_IRQ_REMAP
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
+ amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
+#endif
+}
+
+/*
+ * Suspend/Resume support
+ * disable suspend until real resume implemented
+ */
+
+static void amd_iommu_resume(void)
+{
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu)
+ iommu_apply_resume_quirks(iommu);
+
+ /* re-load the hardware */
+ enable_iommus();
+
+ amd_iommu_enable_interrupts();
+}
+
+static int amd_iommu_suspend(void)
+{
+ /* disable IOMMUs to go out of the way for BIOS */
+ disable_iommus();
+
+ return 0;
+}
+
+static struct syscore_ops amd_iommu_syscore_ops = {
+ .suspend = amd_iommu_suspend,
+ .resume = amd_iommu_resume,
+};
+
+static void __init free_iommu_resources(void)
+{
+ kmem_cache_destroy(amd_iommu_irq_cache);
+ amd_iommu_irq_cache = NULL;
+
+ free_iommu_all();
+ free_pci_segments();
+}
+
+/* SB IOAPIC is always on this device in AMD systems */
+#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
+
+static bool __init check_ioapic_information(void)
+{
+ const char *fw_bug = FW_BUG;
+ bool ret, has_sb_ioapic;
+ int idx;
+
+ has_sb_ioapic = false;
+ ret = false;
+
+ /*
+ * If we have map overrides on the kernel command line the
+ * messages in this function might not describe firmware bugs
+ * anymore - so be careful
+ */
+ if (cmdline_maps)
+ fw_bug = "";
+
+ for (idx = 0; idx < nr_ioapics; idx++) {
+ int devid, id = mpc_ioapic_id(idx);
+
+ devid = get_ioapic_devid(id);
+ if (devid < 0) {
+ pr_err("%s: IOAPIC[%d] not in IVRS table\n",
+ fw_bug, id);
+ ret = false;
+ } else if (devid == IOAPIC_SB_DEVID) {
+ has_sb_ioapic = true;
+ ret = true;
+ }
+ }
+
+ if (!has_sb_ioapic) {
+ /*
+ * We expect the SB IOAPIC to be listed in the IVRS
+ * table. The system timer is connected to the SB IOAPIC
+ * and if we don't have it in the list the system will
+ * panic at boot time. This situation usually happens
+ * when the BIOS is buggy and provides us the wrong
+ * device id for the IOAPIC in the system.
+ */
+ pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
+ }
+
+ if (!ret)
+ pr_err("Disabling interrupt remapping\n");
+
+ return ret;
+}
+
+static void __init free_dma_resources(void)
+{
+ free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
+ get_order(MAX_DOMAIN_ID/8));
+ amd_iommu_pd_alloc_bitmap = NULL;
+
+ free_unity_maps();
+}
+
+static void __init ivinfo_init(void *ivrs)
+{
+ amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
+}
+
+/*
+ * This is the hardware init function for AMD IOMMU in the system.
+ * This function is called either from amd_iommu_init or from the interrupt
+ * remapping setup code.
+ *
+ * This function basically parses the ACPI table for AMD IOMMU (IVRS)
+ * four times:
+ *
+ * 1 pass) Discover the most comprehensive IVHD type to use.
+ *
+ * 2 pass) Find the highest PCI device id the driver has to handle.
+ * Upon this information the size of the data structures is
+ * determined that needs to be allocated.
+ *
+ * 3 pass) Initialize the data structures just allocated with the
+ * information in the ACPI table about available AMD IOMMUs
+ * in the system. It also maps the PCI devices in the
+ * system to specific IOMMUs
+ *
+ * 4 pass) After the basic data structures are allocated and
+ * initialized we update them with information about memory
+ * remapping requirements parsed out of the ACPI table in
+ * this last pass.
+ *
+ * After everything is set up the IOMMUs are enabled and the necessary
+ * hotplug and suspend notifiers are registered.
+ */
+static int __init early_amd_iommu_init(void)
+{
+ struct acpi_table_header *ivrs_base;
+ int remap_cache_sz, ret;
+ acpi_status status;
+
+ if (!amd_iommu_detected)
+ return -ENODEV;
+
+ status = acpi_get_table("IVRS", 0, &ivrs_base);
+ if (status == AE_NOT_FOUND)
+ return -ENODEV;
+ else if (ACPI_FAILURE(status)) {
+ const char *err = acpi_format_exception(status);
+ pr_err("IVRS table error: %s\n", err);
+ return -EINVAL;
+ }
+
+ /*
+ * Validate checksum here so we don't need to do it when
+ * we actually parse the table
+ */
+ ret = check_ivrs_checksum(ivrs_base);
+ if (ret)
+ goto out;
+
+ ivinfo_init(ivrs_base);
+
+ amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
+ DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
+
+ /* Device table - directly used by all IOMMUs */
+ ret = -ENOMEM;
+
+ amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(MAX_DOMAIN_ID/8));
+ if (amd_iommu_pd_alloc_bitmap == NULL)
+ goto out;
+
+ /*
+ * never allocate domain 0 because its used as the non-allocated and
+ * error value placeholder
+ */
+ __set_bit(0, amd_iommu_pd_alloc_bitmap);
+
+ /*
+ * now the data structures are allocated and basically initialized
+ * start the real acpi table scan
+ */
+ ret = init_iommu_all(ivrs_base);
+ if (ret)
+ goto out;
+
+ /* 5 level guest page table */
+ if (cpu_feature_enabled(X86_FEATURE_LA57) &&
+ check_feature_gpt_level() == GUEST_PGTABLE_5_LEVEL)
+ amd_iommu_gpt_level = PAGE_MODE_5_LEVEL;
+
+ /* Disable any previously enabled IOMMUs */
+ if (!is_kdump_kernel() || amd_iommu_disabled)
+ disable_iommus();
+
+ if (amd_iommu_irq_remap)
+ amd_iommu_irq_remap = check_ioapic_information();
+
+ if (amd_iommu_irq_remap) {
+ struct amd_iommu_pci_seg *pci_seg;
+ /*
+ * Interrupt remapping enabled, create kmem_cache for the
+ * remapping tables.
+ */
+ ret = -ENOMEM;
+ if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+ remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
+ else
+ remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
+ amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
+ remap_cache_sz,
+ DTE_INTTAB_ALIGNMENT,
+ 0, NULL);
+ if (!amd_iommu_irq_cache)
+ goto out;
+
+ for_each_pci_segment(pci_seg) {
+ if (alloc_irq_lookup_table(pci_seg))
+ goto out;
+ }
+ }
+
+ ret = init_memory_definitions(ivrs_base);
+ if (ret)
+ goto out;
+
+ /* init the device table */
+ init_device_table();
+
+out:
+ /* Don't leak any ACPI memory */
+ acpi_put_table(ivrs_base);
+
+ return ret;
+}
+
+static int amd_iommu_enable_interrupts(void)
+{
+ struct amd_iommu *iommu;
+ int ret = 0;
+
+ for_each_iommu(iommu) {
+ ret = iommu_init_irq(iommu);
+ if (ret)
+ goto out;
+ }
+
+ /*
+ * Interrupt handler is ready to process interrupts. Enable
+ * PPR and GA log interrupt for all IOMMUs.
+ */
+ enable_iommus_vapic();
+ enable_iommus_v2();
+
+out:
+ return ret;
+}
+
+static bool __init detect_ivrs(void)
+{
+ struct acpi_table_header *ivrs_base;
+ acpi_status status;
+ int i;
+
+ status = acpi_get_table("IVRS", 0, &ivrs_base);
+ if (status == AE_NOT_FOUND)
+ return false;
+ else if (ACPI_FAILURE(status)) {
+ const char *err = acpi_format_exception(status);
+ pr_err("IVRS table error: %s\n", err);
+ return false;
+ }
+
+ acpi_put_table(ivrs_base);
+
+ if (amd_iommu_force_enable)
+ goto out;
+
+ /* Don't use IOMMU if there is Stoney Ridge graphics */
+ for (i = 0; i < 32; i++) {
+ u32 pci_id;
+
+ pci_id = read_pci_config(0, i, 0, 0);
+ if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
+ pr_info("Disable IOMMU on Stoney Ridge\n");
+ return false;
+ }
+ }
+
+out:
+ /* Make sure ACS will be enabled during PCI probe */
+ pci_request_acs();
+
+ return true;
+}
+
+/****************************************************************************
+ *
+ * AMD IOMMU Initialization State Machine
+ *
+ ****************************************************************************/
+
+static int __init state_next(void)
+{
+ int ret = 0;
+
+ switch (init_state) {
+ case IOMMU_START_STATE:
+ if (!detect_ivrs()) {
+ init_state = IOMMU_NOT_FOUND;
+ ret = -ENODEV;
+ } else {
+ init_state = IOMMU_IVRS_DETECTED;
+ }
+ break;
+ case IOMMU_IVRS_DETECTED:
+ if (amd_iommu_disabled) {
+ init_state = IOMMU_CMDLINE_DISABLED;
+ ret = -EINVAL;
+ } else {
+ ret = early_amd_iommu_init();
+ init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
+ }
+ break;
+ case IOMMU_ACPI_FINISHED:
+ early_enable_iommus();
+ x86_platform.iommu_shutdown = disable_iommus;
+ init_state = IOMMU_ENABLED;
+ break;
+ case IOMMU_ENABLED:
+ register_syscore_ops(&amd_iommu_syscore_ops);
+ ret = amd_iommu_init_pci();
+ init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
+ break;
+ case IOMMU_PCI_INIT:
+ ret = amd_iommu_enable_interrupts();
+ init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
+ break;
+ case IOMMU_INTERRUPTS_EN:
+ init_state = IOMMU_INITIALIZED;
+ break;
+ case IOMMU_INITIALIZED:
+ /* Nothing to do */
+ break;
+ case IOMMU_NOT_FOUND:
+ case IOMMU_INIT_ERROR:
+ case IOMMU_CMDLINE_DISABLED:
+ /* Error states => do nothing */
+ ret = -EINVAL;
+ break;
+ default:
+ /* Unknown state */
+ BUG();
+ }
+
+ if (ret) {
+ free_dma_resources();
+ if (!irq_remapping_enabled) {
+ disable_iommus();
+ free_iommu_resources();
+ } else {
+ struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg)
+ uninit_device_table_dma(pci_seg);
+
+ for_each_iommu(iommu)
+ iommu_flush_all_caches(iommu);
+ }
+ }
+ return ret;
+}
+
+static int __init iommu_go_to_state(enum iommu_init_state state)
+{
+ int ret = -EINVAL;
+
+ while (init_state != state) {
+ if (init_state == IOMMU_NOT_FOUND ||
+ init_state == IOMMU_INIT_ERROR ||
+ init_state == IOMMU_CMDLINE_DISABLED)
+ break;
+ ret = state_next();
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_IRQ_REMAP
+int __init amd_iommu_prepare(void)
+{
+ int ret;
+
+ amd_iommu_irq_remap = true;
+
+ ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
+ if (ret) {
+ amd_iommu_irq_remap = false;
+ return ret;
+ }
+
+ return amd_iommu_irq_remap ? 0 : -ENODEV;
+}
+
+int __init amd_iommu_enable(void)
+{
+ int ret;
+
+ ret = iommu_go_to_state(IOMMU_ENABLED);
+ if (ret)
+ return ret;
+
+ irq_remapping_enabled = 1;
+ return amd_iommu_xt_mode;
+}
+
+void amd_iommu_disable(void)
+{
+ amd_iommu_suspend();
+}
+
+int amd_iommu_reenable(int mode)
+{
+ amd_iommu_resume();
+
+ return 0;
+}
+
+int __init amd_iommu_enable_faulting(void)
+{
+ /* We enable MSI later when PCI is initialized */
+ return 0;
+}
+#endif
+
+/*
+ * This is the core init function for AMD IOMMU hardware in the system.
+ * This function is called from the generic x86 DMA layer initialization
+ * code.
+ */
+static int __init amd_iommu_init(void)
+{
+ struct amd_iommu *iommu;
+ int ret;
+
+ ret = iommu_go_to_state(IOMMU_INITIALIZED);
+#ifdef CONFIG_GART_IOMMU
+ if (ret && list_empty(&amd_iommu_list)) {
+ /*
+ * We failed to initialize the AMD IOMMU - try fallback
+ * to GART if possible.
+ */
+ gart_iommu_init();
+ }
+#endif
+
+ for_each_iommu(iommu)
+ amd_iommu_debugfs_setup(iommu);
+
+ return ret;
+}
+
+static bool amd_iommu_sme_check(void)
+{
+ if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) ||
+ (boot_cpu_data.x86 != 0x17))
+ return true;
+
+ /* For Fam17h, a specific level of support is required */
+ if (boot_cpu_data.microcode >= 0x08001205)
+ return true;
+
+ if ((boot_cpu_data.microcode >= 0x08001126) &&
+ (boot_cpu_data.microcode <= 0x080011ff))
+ return true;
+
+ pr_notice("IOMMU not currently supported when SME is active\n");
+
+ return false;
+}
+
+/****************************************************************************
+ *
+ * Early detect code. This code runs at IOMMU detection time in the DMA
+ * layer. It just looks if there is an IVRS ACPI table to detect AMD
+ * IOMMUs
+ *
+ ****************************************************************************/
+int __init amd_iommu_detect(void)
+{
+ int ret;
+
+ if (no_iommu || (iommu_detected && !gart_iommu_aperture))
+ return -ENODEV;
+
+ if (!amd_iommu_sme_check())
+ return -ENODEV;
+
+ ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
+ if (ret)
+ return ret;
+
+ amd_iommu_detected = true;
+ iommu_detected = 1;
+ x86_init.iommu.iommu_init = amd_iommu_init;
+
+ return 1;
+}
+
+/****************************************************************************
+ *
+ * Parsing functions for the AMD IOMMU specific kernel command line
+ * options.
+ *
+ ****************************************************************************/
+
+static int __init parse_amd_iommu_dump(char *str)
+{
+ amd_iommu_dump = true;
+
+ return 1;
+}
+
+static int __init parse_amd_iommu_intr(char *str)
+{
+ for (; *str; ++str) {
+ if (strncmp(str, "legacy", 6) == 0) {
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+ break;
+ }
+ if (strncmp(str, "vapic", 5) == 0) {
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
+ break;
+ }
+ }
+ return 1;
+}
+
+static int __init parse_amd_iommu_options(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ while (*str) {
+ if (strncmp(str, "fullflush", 9) == 0) {
+ pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
+ iommu_set_dma_strict();
+ } else if (strncmp(str, "force_enable", 12) == 0) {
+ amd_iommu_force_enable = true;
+ } else if (strncmp(str, "off", 3) == 0) {
+ amd_iommu_disabled = true;
+ } else if (strncmp(str, "force_isolation", 15) == 0) {
+ amd_iommu_force_isolation = true;
+ } else if (strncmp(str, "pgtbl_v1", 8) == 0) {
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ } else if (strncmp(str, "pgtbl_v2", 8) == 0) {
+ amd_iommu_pgtable = AMD_IOMMU_V2;
+ } else if (strncmp(str, "irtcachedis", 11) == 0) {
+ amd_iommu_irtcachedis = true;
+ } else {
+ pr_notice("Unknown option - '%s'\n", str);
+ }
+
+ str += strcspn(str, ",");
+ while (*str == ',')
+ str++;
+ }
+
+ return 1;
+}
+
+static int __init parse_ivrs_ioapic(char *str)
+{
+ u32 seg = 0, bus, dev, fn;
+ int id, i;
+ u32 devid;
+
+ if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
+ sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
+ goto found;
+
+ if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
+ sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
+ pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n",
+ str, id, seg, bus, dev, fn);
+ goto found;
+ }
+
+ pr_err("Invalid command line: ivrs_ioapic%s\n", str);
+ return 1;
+
+found:
+ if (early_ioapic_map_size == EARLY_MAP_SIZE) {
+ pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
+ str);
+ return 1;
+ }
+
+ devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
+
+ cmdline_maps = true;
+ i = early_ioapic_map_size++;
+ early_ioapic_map[i].id = id;
+ early_ioapic_map[i].devid = devid;
+ early_ioapic_map[i].cmd_line = true;
+
+ return 1;
+}
+
+static int __init parse_ivrs_hpet(char *str)
+{
+ u32 seg = 0, bus, dev, fn;
+ int id, i;
+ u32 devid;
+
+ if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
+ sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
+ goto found;
+
+ if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
+ sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
+ pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n",
+ str, id, seg, bus, dev, fn);
+ goto found;
+ }
+
+ pr_err("Invalid command line: ivrs_hpet%s\n", str);
+ return 1;
+
+found:
+ if (early_hpet_map_size == EARLY_MAP_SIZE) {
+ pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
+ str);
+ return 1;
+ }
+
+ devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
+
+ cmdline_maps = true;
+ i = early_hpet_map_size++;
+ early_hpet_map[i].id = id;
+ early_hpet_map[i].devid = devid;
+ early_hpet_map[i].cmd_line = true;
+
+ return 1;
+}
+
+#define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN)
+
+static int __init parse_ivrs_acpihid(char *str)
+{
+ u32 seg = 0, bus, dev, fn;
+ char *hid, *uid, *p, *addr;
+ char acpiid[ACPIID_LEN] = {0};
+ int i;
+
+ addr = strchr(str, '@');
+ if (!addr) {
+ addr = strchr(str, '=');
+ if (!addr)
+ goto not_found;
+
+ ++addr;
+
+ if (strlen(addr) > ACPIID_LEN)
+ goto not_found;
+
+ if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 ||
+ sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) {
+ pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n",
+ str, acpiid, seg, bus, dev, fn);
+ goto found;
+ }
+ goto not_found;
+ }
+
+ /* We have the '@', make it the terminator to get just the acpiid */
+ *addr++ = 0;
+
+ if (strlen(str) > ACPIID_LEN + 1)
+ goto not_found;
+
+ if (sscanf(str, "=%s", acpiid) != 1)
+ goto not_found;
+
+ if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 ||
+ sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4)
+ goto found;
+
+not_found:
+ pr_err("Invalid command line: ivrs_acpihid%s\n", str);
+ return 1;
+
+found:
+ p = acpiid;
+ hid = strsep(&p, ":");
+ uid = p;
+
+ if (!hid || !(*hid) || !uid) {
+ pr_err("Invalid command line: hid or uid\n");
+ return 1;
+ }
+
+ /*
+ * Ignore leading zeroes after ':', so e.g., AMDI0095:00
+ * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match
+ */
+ while (*uid == '0' && *(uid + 1))
+ uid++;
+
+ i = early_acpihid_map_size++;
+ memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
+ memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
+ early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
+ early_acpihid_map[i].cmd_line = true;
+
+ return 1;
+}
+
+__setup("amd_iommu_dump", parse_amd_iommu_dump);
+__setup("amd_iommu=", parse_amd_iommu_options);
+__setup("amd_iommu_intr=", parse_amd_iommu_intr);
+__setup("ivrs_ioapic", parse_ivrs_ioapic);
+__setup("ivrs_hpet", parse_ivrs_hpet);
+__setup("ivrs_acpihid", parse_ivrs_acpihid);
+
+bool amd_iommu_v2_supported(void)
+{
+ /* CPU page table size should match IOMMU guest page table size */
+ if (cpu_feature_enabled(X86_FEATURE_LA57) &&
+ amd_iommu_gpt_level != PAGE_MODE_5_LEVEL)
+ return false;
+
+ /*
+ * Since DTE[Mode]=0 is prohibited on SNP-enabled system
+ * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
+ * setting up IOMMUv1 page table.
+ */
+ return amd_iommu_v2_present && !amd_iommu_snp_en;
+}
+EXPORT_SYMBOL(amd_iommu_v2_supported);
+
+struct amd_iommu *get_amd_iommu(unsigned int idx)
+{
+ unsigned int i = 0;
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu)
+ if (i++ == idx)
+ return iommu;
+ return NULL;
+}
+
+/****************************************************************************
+ *
+ * IOMMU EFR Performance Counter support functionality. This code allows
+ * access to the IOMMU PC functionality.
+ *
+ ****************************************************************************/
+
+u8 amd_iommu_pc_get_max_banks(unsigned int idx)
+{
+ struct amd_iommu *iommu = get_amd_iommu(idx);
+
+ if (iommu)
+ return iommu->max_banks;
+
+ return 0;
+}
+EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
+
+bool amd_iommu_pc_supported(void)
+{
+ return amd_iommu_pc_present;
+}
+EXPORT_SYMBOL(amd_iommu_pc_supported);
+
+u8 amd_iommu_pc_get_max_counters(unsigned int idx)
+{
+ struct amd_iommu *iommu = get_amd_iommu(idx);
+
+ if (iommu)
+ return iommu->max_counters;
+
+ return 0;
+}
+EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
+
+static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+ u8 fxn, u64 *value, bool is_write)
+{
+ u32 offset;
+ u32 max_offset_lim;
+
+ /* Make sure the IOMMU PC resource is available */
+ if (!amd_iommu_pc_present)
+ return -ENODEV;
+
+ /* Check for valid iommu and pc register indexing */
+ if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
+ return -ENODEV;
+
+ offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
+
+ /* Limit the offset to the hw defined mmio region aperture */
+ max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
+ (iommu->max_counters << 8) | 0x28);
+ if ((offset < MMIO_CNTR_REG_OFFSET) ||
+ (offset > max_offset_lim))
+ return -EINVAL;
+
+ if (is_write) {
+ u64 val = *value & GENMASK_ULL(47, 0);
+
+ writel((u32)val, iommu->mmio_base + offset);
+ writel((val >> 32), iommu->mmio_base + offset + 4);
+ } else {
+ *value = readl(iommu->mmio_base + offset + 4);
+ *value <<= 32;
+ *value |= readl(iommu->mmio_base + offset);
+ *value &= GENMASK_ULL(47, 0);
+ }
+
+ return 0;
+}
+
+int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
+{
+ if (!iommu)
+ return -EINVAL;
+
+ return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
+}
+
+int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
+{
+ if (!iommu)
+ return -EINVAL;
+
+ return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
+}
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+int amd_iommu_snp_enable(void)
+{
+ /*
+ * The SNP support requires that IOMMU must be enabled, and is
+ * not configured in the passthrough mode.
+ */
+ if (no_iommu || iommu_default_passthrough()) {
+ pr_err("SNP: IOMMU is disabled or configured in passthrough mode, SNP cannot be supported");
+ return -EINVAL;
+ }
+
+ /*
+ * Prevent enabling SNP after IOMMU_ENABLED state because this process
+ * affect how IOMMU driver sets up data structures and configures
+ * IOMMU hardware.
+ */
+ if (init_state > IOMMU_ENABLED) {
+ pr_err("SNP: Too late to enable SNP for IOMMU.\n");
+ return -EINVAL;
+ }
+
+ amd_iommu_snp_en = check_feature_on_all_iommus(FEATURE_SNP);
+ if (!amd_iommu_snp_en)
+ return -EINVAL;
+
+ pr_info("SNP enabled\n");
+
+ /* Enforce IOMMU v1 pagetable when SNP is enabled. */
+ if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+ pr_warn("Force to using AMD IOMMU v1 page table due to SNP\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
new file mode 100644
index 0000000000..2892aa1b4d
--- /dev/null
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -0,0 +1,537 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPU-agnostic AMD IO page table allocator.
+ *
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+ */
+
+#define pr_fmt(fmt) "AMD-Vi: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/io-pgtable.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/barrier.h>
+
+#include "amd_iommu_types.h"
+#include "amd_iommu.h"
+
+static void v1_tlb_flush_all(void *cookie)
+{
+}
+
+static void v1_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+}
+
+static void v1_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+}
+
+static const struct iommu_flush_ops v1_flush_ops = {
+ .tlb_flush_all = v1_tlb_flush_all,
+ .tlb_flush_walk = v1_tlb_flush_walk,
+ .tlb_add_page = v1_tlb_add_page,
+};
+
+/*
+ * Helper function to get the first pte of a large mapping
+ */
+static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
+ unsigned long *count)
+{
+ unsigned long pte_mask, pg_size, cnt;
+ u64 *fpte;
+
+ pg_size = PTE_PAGE_SIZE(*pte);
+ cnt = PAGE_SIZE_PTE_COUNT(pg_size);
+ pte_mask = ~((cnt << 3) - 1);
+ fpte = (u64 *)(((unsigned long)pte) & pte_mask);
+
+ if (page_size)
+ *page_size = pg_size;
+
+ if (count)
+ *count = cnt;
+
+ return fpte;
+}
+
+/****************************************************************************
+ *
+ * The functions below are used the create the page table mappings for
+ * unity mapped regions.
+ *
+ ****************************************************************************/
+
+static void free_pt_page(u64 *pt, struct list_head *freelist)
+{
+ struct page *p = virt_to_page(pt);
+
+ list_add_tail(&p->lru, freelist);
+}
+
+static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
+{
+ u64 *p;
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ /* PTE present? */
+ if (!IOMMU_PTE_PRESENT(pt[i]))
+ continue;
+
+ /* Large PTE? */
+ if (PM_PTE_LEVEL(pt[i]) == 0 ||
+ PM_PTE_LEVEL(pt[i]) == 7)
+ continue;
+
+ /*
+ * Free the next level. No need to look at l1 tables here since
+ * they can only contain leaf PTEs; just free them directly.
+ */
+ p = IOMMU_PTE_PAGE(pt[i]);
+ if (lvl > 2)
+ free_pt_lvl(p, freelist, lvl - 1);
+ else
+ free_pt_page(p, freelist);
+ }
+
+ free_pt_page(pt, freelist);
+}
+
+static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
+{
+ switch (mode) {
+ case PAGE_MODE_NONE:
+ case PAGE_MODE_7_LEVEL:
+ break;
+ case PAGE_MODE_1_LEVEL:
+ free_pt_page(root, freelist);
+ break;
+ case PAGE_MODE_2_LEVEL:
+ case PAGE_MODE_3_LEVEL:
+ case PAGE_MODE_4_LEVEL:
+ case PAGE_MODE_5_LEVEL:
+ case PAGE_MODE_6_LEVEL:
+ free_pt_lvl(root, freelist, mode);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
+ u64 *root, int mode)
+{
+ u64 pt_root;
+
+ /* lowest 3 bits encode pgtable mode */
+ pt_root = mode & 7;
+ pt_root |= (u64)root;
+
+ amd_iommu_domain_set_pt_root(domain, pt_root);
+}
+
+/*
+ * This function is used to add another level to an IO page table. Adding
+ * another level increases the size of the address space by 9 bits to a size up
+ * to 64 bits.
+ */
+static bool increase_address_space(struct protection_domain *domain,
+ unsigned long address,
+ gfp_t gfp)
+{
+ unsigned long flags;
+ bool ret = true;
+ u64 *pte;
+
+ pte = alloc_pgtable_page(domain->nid, gfp);
+ if (!pte)
+ return false;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ if (address <= PM_LEVEL_SIZE(domain->iop.mode))
+ goto out;
+
+ ret = false;
+ if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL))
+ goto out;
+
+ *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root));
+
+ domain->iop.root = pte;
+ domain->iop.mode += 1;
+ amd_iommu_update_and_flush_device_table(domain);
+ amd_iommu_domain_flush_complete(domain);
+
+ /*
+ * Device Table needs to be updated and flushed before the new root can
+ * be published.
+ */
+ amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode);
+
+ pte = NULL;
+ ret = true;
+
+out:
+ spin_unlock_irqrestore(&domain->lock, flags);
+ free_page((unsigned long)pte);
+
+ return ret;
+}
+
+static u64 *alloc_pte(struct protection_domain *domain,
+ unsigned long address,
+ unsigned long page_size,
+ u64 **pte_page,
+ gfp_t gfp,
+ bool *updated)
+{
+ int level, end_lvl;
+ u64 *pte, *page;
+
+ BUG_ON(!is_power_of_2(page_size));
+
+ while (address > PM_LEVEL_SIZE(domain->iop.mode)) {
+ /*
+ * Return an error if there is no memory to update the
+ * page-table.
+ */
+ if (!increase_address_space(domain, address, gfp))
+ return NULL;
+ }
+
+
+ level = domain->iop.mode - 1;
+ pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
+ address = PAGE_SIZE_ALIGN(address, page_size);
+ end_lvl = PAGE_SIZE_LEVEL(page_size);
+
+ while (level > end_lvl) {
+ u64 __pte, __npte;
+ int pte_level;
+
+ __pte = *pte;
+ pte_level = PM_PTE_LEVEL(__pte);
+
+ /*
+ * If we replace a series of large PTEs, we need
+ * to tear down all of them.
+ */
+ if (IOMMU_PTE_PRESENT(__pte) &&
+ pte_level == PAGE_MODE_7_LEVEL) {
+ unsigned long count, i;
+ u64 *lpte;
+
+ lpte = first_pte_l7(pte, NULL, &count);
+
+ /*
+ * Unmap the replicated PTEs that still match the
+ * original large mapping
+ */
+ for (i = 0; i < count; ++i)
+ cmpxchg64(&lpte[i], __pte, 0ULL);
+
+ *updated = true;
+ continue;
+ }
+
+ if (!IOMMU_PTE_PRESENT(__pte) ||
+ pte_level == PAGE_MODE_NONE) {
+ page = alloc_pgtable_page(domain->nid, gfp);
+
+ if (!page)
+ return NULL;
+
+ __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
+
+ /* pte could have been changed somewhere. */
+ if (!try_cmpxchg64(pte, &__pte, __npte))
+ free_page((unsigned long)page);
+ else if (IOMMU_PTE_PRESENT(__pte))
+ *updated = true;
+
+ continue;
+ }
+
+ /* No level skipping support yet */
+ if (pte_level != level)
+ return NULL;
+
+ level -= 1;
+
+ pte = IOMMU_PTE_PAGE(__pte);
+
+ if (pte_page && level == end_lvl)
+ *pte_page = pte;
+
+ pte = &pte[PM_LEVEL_INDEX(level, address)];
+ }
+
+ return pte;
+}
+
+/*
+ * This function checks if there is a PTE for a given dma address. If
+ * there is one, it returns the pointer to it.
+ */
+static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
+ unsigned long address,
+ unsigned long *page_size)
+{
+ int level;
+ u64 *pte;
+
+ *page_size = 0;
+
+ if (address > PM_LEVEL_SIZE(pgtable->mode))
+ return NULL;
+
+ level = pgtable->mode - 1;
+ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ *page_size = PTE_LEVEL_PAGE_SIZE(level);
+
+ while (level > 0) {
+
+ /* Not Present */
+ if (!IOMMU_PTE_PRESENT(*pte))
+ return NULL;
+
+ /* Large PTE */
+ if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL ||
+ PM_PTE_LEVEL(*pte) == PAGE_MODE_NONE)
+ break;
+
+ /* No level skipping support yet */
+ if (PM_PTE_LEVEL(*pte) != level)
+ return NULL;
+
+ level -= 1;
+
+ /* Walk to the next level */
+ pte = IOMMU_PTE_PAGE(*pte);
+ pte = &pte[PM_LEVEL_INDEX(level, address)];
+ *page_size = PTE_LEVEL_PAGE_SIZE(level);
+ }
+
+ /*
+ * If we have a series of large PTEs, make
+ * sure to return a pointer to the first one.
+ */
+ if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
+ pte = first_pte_l7(pte, page_size, NULL);
+
+ return pte;
+}
+
+static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
+{
+ u64 *pt;
+ int mode;
+
+ while (!try_cmpxchg64(pte, &pteval, 0))
+ pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
+
+ if (!IOMMU_PTE_PRESENT(pteval))
+ return;
+
+ pt = IOMMU_PTE_PAGE(pteval);
+ mode = IOMMU_PTE_MODE(pteval);
+
+ free_sub_pt(pt, mode, freelist);
+}
+
+/*
+ * Generic mapping functions. It maps a physical address into a DMA
+ * address space. It allocates the page table pages if necessary.
+ * In the future it can be extended to a generic mapping function
+ * supporting all features of AMD IOMMU page tables like level skipping
+ * and full 64 bit address spaces.
+ */
+static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
+{
+ struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
+ LIST_HEAD(freelist);
+ bool updated = false;
+ u64 __pte, *pte;
+ int ret, i, count;
+
+ BUG_ON(!IS_ALIGNED(iova, pgsize));
+ BUG_ON(!IS_ALIGNED(paddr, pgsize));
+
+ ret = -EINVAL;
+ if (!(prot & IOMMU_PROT_MASK))
+ goto out;
+
+ while (pgcount > 0) {
+ count = PAGE_SIZE_PTE_COUNT(pgsize);
+ pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
+
+ ret = -ENOMEM;
+ if (!pte)
+ goto out;
+
+ for (i = 0; i < count; ++i)
+ free_clear_pte(&pte[i], pte[i], &freelist);
+
+ if (!list_empty(&freelist))
+ updated = true;
+
+ if (count > 1) {
+ __pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
+ __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+ } else
+ __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+
+ if (prot & IOMMU_PROT_IR)
+ __pte |= IOMMU_PTE_IR;
+ if (prot & IOMMU_PROT_IW)
+ __pte |= IOMMU_PTE_IW;
+
+ for (i = 0; i < count; ++i)
+ pte[i] = __pte;
+
+ iova += pgsize;
+ paddr += pgsize;
+ pgcount--;
+ if (mapped)
+ *mapped += pgsize;
+ }
+
+ ret = 0;
+
+out:
+ if (updated) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dom->lock, flags);
+ /*
+ * Flush domain TLB(s) and wait for completion. Any Device-Table
+ * Updates and flushing already happened in
+ * increase_address_space().
+ */
+ amd_iommu_domain_flush_tlb_pde(dom);
+ amd_iommu_domain_flush_complete(dom);
+ spin_unlock_irqrestore(&dom->lock, flags);
+ }
+
+ /* Everything flushed out, free pages now */
+ put_pages_list(&freelist);
+
+ return ret;
+}
+
+static unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
+ unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ unsigned long long unmapped;
+ unsigned long unmap_size;
+ u64 *pte;
+ size_t size = pgcount << __ffs(pgsize);
+
+ BUG_ON(!is_power_of_2(pgsize));
+
+ unmapped = 0;
+
+ while (unmapped < size) {
+ pte = fetch_pte(pgtable, iova, &unmap_size);
+ if (pte) {
+ int i, count;
+
+ count = PAGE_SIZE_PTE_COUNT(unmap_size);
+ for (i = 0; i < count; i++)
+ pte[i] = 0ULL;
+ } else {
+ return unmapped;
+ }
+
+ iova = (iova & ~(unmap_size - 1)) + unmap_size;
+ unmapped += unmap_size;
+ }
+
+ return unmapped;
+}
+
+static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ unsigned long offset_mask, pte_pgsize;
+ u64 *pte, __pte;
+
+ pte = fetch_pte(pgtable, iova, &pte_pgsize);
+
+ if (!pte || !IOMMU_PTE_PRESENT(*pte))
+ return 0;
+
+ offset_mask = pte_pgsize - 1;
+ __pte = __sme_clr(*pte & PM_ADDR_MASK);
+
+ return (__pte & ~offset_mask) | (iova & offset_mask);
+}
+
+/*
+ * ----------------------------------------------------
+ */
+static void v1_free_pgtable(struct io_pgtable *iop)
+{
+ struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
+ struct protection_domain *dom;
+ LIST_HEAD(freelist);
+
+ if (pgtable->mode == PAGE_MODE_NONE)
+ return;
+
+ dom = container_of(pgtable, struct protection_domain, iop);
+
+ /* Page-table is not visible to IOMMU anymore, so free it */
+ BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
+ pgtable->mode > PAGE_MODE_6_LEVEL);
+
+ free_sub_pt(pgtable->root, pgtable->mode, &freelist);
+
+ /* Update data structure */
+ amd_iommu_domain_clr_pt_root(dom);
+
+ /* Make changes visible to IOMMUs */
+ amd_iommu_domain_update(dom);
+
+ put_pages_list(&freelist);
+}
+
+static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
+
+ cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES,
+ cfg->ias = IOMMU_IN_ADDR_BIT_SIZE,
+ cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
+ cfg->tlb = &v1_flush_ops;
+
+ pgtable->iop.ops.map_pages = iommu_v1_map_pages;
+ pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages;
+ pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
+
+ return &pgtable->iop;
+}
+
+struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
+ .alloc = v1_alloc_pgtable,
+ .free = v1_free_pgtable,
+};
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
new file mode 100644
index 0000000000..e9ef2e0a62
--- /dev/null
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPU-agnostic AMD IO page table v2 allocator.
+ *
+ * Copyright (C) 2022, 2023 Advanced Micro Devices, Inc.
+ * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+ * Author: Vasant Hegde <vasant.hegde@amd.com>
+ */
+
+#define pr_fmt(fmt) "AMD-Vi: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
+
+#include <linux/bitops.h>
+#include <linux/io-pgtable.h>
+#include <linux/kernel.h>
+
+#include <asm/barrier.h>
+
+#include "amd_iommu_types.h"
+#include "amd_iommu.h"
+
+#define IOMMU_PAGE_PRESENT BIT_ULL(0) /* Is present */
+#define IOMMU_PAGE_RW BIT_ULL(1) /* Writeable */
+#define IOMMU_PAGE_USER BIT_ULL(2) /* Userspace addressable */
+#define IOMMU_PAGE_PWT BIT_ULL(3) /* Page write through */
+#define IOMMU_PAGE_PCD BIT_ULL(4) /* Page cache disabled */
+#define IOMMU_PAGE_ACCESS BIT_ULL(5) /* Was accessed (updated by IOMMU) */
+#define IOMMU_PAGE_DIRTY BIT_ULL(6) /* Was written to (updated by IOMMU) */
+#define IOMMU_PAGE_PSE BIT_ULL(7) /* Page Size Extensions */
+#define IOMMU_PAGE_NX BIT_ULL(63) /* No execute */
+
+#define MAX_PTRS_PER_PAGE 512
+
+#define IOMMU_PAGE_SIZE_2M BIT_ULL(21)
+#define IOMMU_PAGE_SIZE_1G BIT_ULL(30)
+
+
+static inline int get_pgtable_level(void)
+{
+ return amd_iommu_gpt_level;
+}
+
+static inline bool is_large_pte(u64 pte)
+{
+ return (pte & IOMMU_PAGE_PSE);
+}
+
+static inline u64 set_pgtable_attr(u64 *page)
+{
+ u64 prot;
+
+ prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER;
+ prot |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
+
+ return (iommu_virt_to_phys(page) | prot);
+}
+
+static inline void *get_pgtable_pte(u64 pte)
+{
+ return iommu_phys_to_virt(pte & PM_ADDR_MASK);
+}
+
+static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot)
+{
+ u64 pte;
+
+ pte = __sme_set(paddr & PM_ADDR_MASK);
+ pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER;
+ pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
+
+ if (prot & IOMMU_PROT_IW)
+ pte |= IOMMU_PAGE_RW;
+
+ /* Large page */
+ if (pg_size == IOMMU_PAGE_SIZE_1G || pg_size == IOMMU_PAGE_SIZE_2M)
+ pte |= IOMMU_PAGE_PSE;
+
+ return pte;
+}
+
+static inline u64 get_alloc_page_size(u64 size)
+{
+ if (size >= IOMMU_PAGE_SIZE_1G)
+ return IOMMU_PAGE_SIZE_1G;
+
+ if (size >= IOMMU_PAGE_SIZE_2M)
+ return IOMMU_PAGE_SIZE_2M;
+
+ return PAGE_SIZE;
+}
+
+static inline int page_size_to_level(u64 pg_size)
+{
+ if (pg_size == IOMMU_PAGE_SIZE_1G)
+ return PAGE_MODE_3_LEVEL;
+ if (pg_size == IOMMU_PAGE_SIZE_2M)
+ return PAGE_MODE_2_LEVEL;
+
+ return PAGE_MODE_1_LEVEL;
+}
+
+static inline void free_pgtable_page(u64 *pt)
+{
+ free_page((unsigned long)pt);
+}
+
+static void free_pgtable(u64 *pt, int level)
+{
+ u64 *p;
+ int i;
+
+ for (i = 0; i < MAX_PTRS_PER_PAGE; i++) {
+ /* PTE present? */
+ if (!IOMMU_PTE_PRESENT(pt[i]))
+ continue;
+
+ if (is_large_pte(pt[i]))
+ continue;
+
+ /*
+ * Free the next level. No need to look at l1 tables here since
+ * they can only contain leaf PTEs; just free them directly.
+ */
+ p = get_pgtable_pte(pt[i]);
+ if (level > 2)
+ free_pgtable(p, level - 1);
+ else
+ free_pgtable_page(p);
+ }
+
+ free_pgtable_page(pt);
+}
+
+/* Allocate page table */
+static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
+ unsigned long pg_size, gfp_t gfp, bool *updated)
+{
+ u64 *pte, *page;
+ int level, end_level;
+
+ level = get_pgtable_level() - 1;
+ end_level = page_size_to_level(pg_size);
+ pte = &pgd[PM_LEVEL_INDEX(level, iova)];
+ iova = PAGE_SIZE_ALIGN(iova, PAGE_SIZE);
+
+ while (level >= end_level) {
+ u64 __pte, __npte;
+
+ __pte = *pte;
+
+ if (IOMMU_PTE_PRESENT(__pte) && is_large_pte(__pte)) {
+ /* Unmap large pte */
+ cmpxchg64(pte, *pte, 0ULL);
+ *updated = true;
+ continue;
+ }
+
+ if (!IOMMU_PTE_PRESENT(__pte)) {
+ page = alloc_pgtable_page(nid, gfp);
+ if (!page)
+ return NULL;
+
+ __npte = set_pgtable_attr(page);
+ /* pte could have been changed somewhere. */
+ if (cmpxchg64(pte, __pte, __npte) != __pte)
+ free_pgtable_page(page);
+ else if (IOMMU_PTE_PRESENT(__pte))
+ *updated = true;
+
+ continue;
+ }
+
+ level -= 1;
+ pte = get_pgtable_pte(__pte);
+ pte = &pte[PM_LEVEL_INDEX(level, iova)];
+ }
+
+ /* Tear down existing pte entries */
+ if (IOMMU_PTE_PRESENT(*pte)) {
+ u64 *__pte;
+
+ *updated = true;
+ __pte = get_pgtable_pte(*pte);
+ cmpxchg64(pte, *pte, 0ULL);
+ if (pg_size == IOMMU_PAGE_SIZE_1G)
+ free_pgtable(__pte, end_level - 1);
+ else if (pg_size == IOMMU_PAGE_SIZE_2M)
+ free_pgtable_page(__pte);
+ }
+
+ return pte;
+}
+
+/*
+ * This function checks if there is a PTE for a given dma address.
+ * If there is one, it returns the pointer to it.
+ */
+static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
+ unsigned long iova, unsigned long *page_size)
+{
+ u64 *pte;
+ int level;
+
+ level = get_pgtable_level() - 1;
+ pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)];
+ /* Default page size is 4K */
+ *page_size = PAGE_SIZE;
+
+ while (level) {
+ /* Not present */
+ if (!IOMMU_PTE_PRESENT(*pte))
+ return NULL;
+
+ /* Walk to the next level */
+ pte = get_pgtable_pte(*pte);
+ pte = &pte[PM_LEVEL_INDEX(level - 1, iova)];
+
+ /* Large page */
+ if (is_large_pte(*pte)) {
+ if (level == PAGE_MODE_3_LEVEL)
+ *page_size = IOMMU_PAGE_SIZE_1G;
+ else if (level == PAGE_MODE_2_LEVEL)
+ *page_size = IOMMU_PAGE_SIZE_2M;
+ else
+ return NULL; /* Wrongly set PSE bit in PTE */
+
+ break;
+ }
+
+ level -= 1;
+ }
+
+ return pte;
+}
+
+static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
+{
+ struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
+ struct io_pgtable_cfg *cfg = &pdom->iop.iop.cfg;
+ u64 *pte;
+ unsigned long map_size;
+ unsigned long mapped_size = 0;
+ unsigned long o_iova = iova;
+ size_t size = pgcount << __ffs(pgsize);
+ int count = 0;
+ int ret = 0;
+ bool updated = false;
+
+ if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount)
+ return -EINVAL;
+
+ if (!(prot & IOMMU_PROT_MASK))
+ return -EINVAL;
+
+ while (mapped_size < size) {
+ map_size = get_alloc_page_size(pgsize);
+ pte = v2_alloc_pte(pdom->nid, pdom->iop.pgd,
+ iova, map_size, gfp, &updated);
+ if (!pte) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *pte = set_pte_attr(paddr, map_size, prot);
+
+ count++;
+ iova += map_size;
+ paddr += map_size;
+ mapped_size += map_size;
+ }
+
+out:
+ if (updated) {
+ if (count > 1)
+ amd_iommu_flush_tlb(&pdom->domain, 0);
+ else
+ amd_iommu_flush_page(&pdom->domain, 0, o_iova);
+ }
+
+ if (mapped)
+ *mapped += mapped_size;
+
+ return ret;
+}
+
+static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops,
+ unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &pgtable->iop.cfg;
+ unsigned long unmap_size;
+ unsigned long unmapped = 0;
+ size_t size = pgcount << __ffs(pgsize);
+ u64 *pte;
+
+ if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
+ return 0;
+
+ while (unmapped < size) {
+ pte = fetch_pte(pgtable, iova, &unmap_size);
+ if (!pte)
+ return unmapped;
+
+ *pte = 0ULL;
+
+ iova = (iova & ~(unmap_size - 1)) + unmap_size;
+ unmapped += unmap_size;
+ }
+
+ return unmapped;
+}
+
+static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ unsigned long offset_mask, pte_pgsize;
+ u64 *pte, __pte;
+
+ pte = fetch_pte(pgtable, iova, &pte_pgsize);
+ if (!pte || !IOMMU_PTE_PRESENT(*pte))
+ return 0;
+
+ offset_mask = pte_pgsize - 1;
+ __pte = __sme_clr(*pte & PM_ADDR_MASK);
+
+ return (__pte & ~offset_mask) | (iova & offset_mask);
+}
+
+/*
+ * ----------------------------------------------------
+ */
+static void v2_tlb_flush_all(void *cookie)
+{
+}
+
+static void v2_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+}
+
+static void v2_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+}
+
+static const struct iommu_flush_ops v2_flush_ops = {
+ .tlb_flush_all = v2_tlb_flush_all,
+ .tlb_flush_walk = v2_tlb_flush_walk,
+ .tlb_add_page = v2_tlb_add_page,
+};
+
+static void v2_free_pgtable(struct io_pgtable *iop)
+{
+ struct protection_domain *pdom;
+ struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
+
+ pdom = container_of(pgtable, struct protection_domain, iop);
+ if (!(pdom->flags & PD_IOMMUV2_MASK))
+ return;
+
+ /*
+ * Make changes visible to IOMMUs. No need to clear gcr3 entry
+ * as gcr3 table is already freed.
+ */
+ amd_iommu_domain_update(pdom);
+
+ /* Free page table */
+ free_pgtable(pgtable->pgd, get_pgtable_level());
+}
+
+static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
+ struct protection_domain *pdom = (struct protection_domain *)cookie;
+ int ret;
+ int ias = IOMMU_IN_ADDR_BIT_SIZE;
+
+ pgtable->pgd = alloc_pgtable_page(pdom->nid, GFP_ATOMIC);
+ if (!pgtable->pgd)
+ return NULL;
+
+ ret = amd_iommu_domain_set_gcr3(&pdom->domain, 0, iommu_virt_to_phys(pgtable->pgd));
+ if (ret)
+ goto err_free_pgd;
+
+ if (get_pgtable_level() == PAGE_MODE_5_LEVEL)
+ ias = 57;
+
+ pgtable->iop.ops.map_pages = iommu_v2_map_pages;
+ pgtable->iop.ops.unmap_pages = iommu_v2_unmap_pages;
+ pgtable->iop.ops.iova_to_phys = iommu_v2_iova_to_phys;
+
+ cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2,
+ cfg->ias = ias,
+ cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
+ cfg->tlb = &v2_flush_ops;
+
+ return &pgtable->iop;
+
+err_free_pgd:
+ free_pgtable_page(pgtable->pgd);
+
+ return NULL;
+}
+
+struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
+ .alloc = v2_alloc_pgtable,
+ .free = v2_free_pgtable,
+};
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
new file mode 100644
index 0000000000..95bd7c25ba
--- /dev/null
+++ b/drivers/iommu/amd/iommu.c
@@ -0,0 +1,3783 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <jroedel@suse.de>
+ * Leo Duran <leo.duran@amd.com>
+ */
+
+#define pr_fmt(fmt) "AMD-Vi: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
+
+#include <linux/ratelimit.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/pci-ats.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-map-ops.h>
+#include <linux/dma-direct.h>
+#include <linux/iommu-helper.h>
+#include <linux/delay.h>
+#include <linux/amd-iommu.h>
+#include <linux/notifier.h>
+#include <linux/export.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <linux/irqdomain.h>
+#include <linux/percpu.h>
+#include <linux/io-pgtable.h>
+#include <linux/cc_platform.h>
+#include <asm/irq_remapping.h>
+#include <asm/io_apic.h>
+#include <asm/apic.h>
+#include <asm/hw_irq.h>
+#include <asm/proto.h>
+#include <asm/iommu.h>
+#include <asm/gart.h>
+#include <asm/dma.h>
+
+#include "amd_iommu.h"
+#include "../dma-iommu.h"
+#include "../irq_remapping.h"
+
+#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
+
+#define LOOP_TIMEOUT 100000
+
+/* IO virtual address start page frame number */
+#define IOVA_START_PFN (1)
+#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
+
+/* Reserved IOVA ranges */
+#define MSI_RANGE_START (0xfee00000)
+#define MSI_RANGE_END (0xfeefffff)
+#define HT_RANGE_START (0xfd00000000ULL)
+#define HT_RANGE_END (0xffffffffffULL)
+
+#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL
+
+static DEFINE_SPINLOCK(pd_bitmap_lock);
+
+LIST_HEAD(ioapic_map);
+LIST_HEAD(hpet_map);
+LIST_HEAD(acpihid_map);
+
+const struct iommu_ops amd_iommu_ops;
+
+static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
+int amd_iommu_max_glx_val = -1;
+
+/*
+ * general struct to manage commands send to an IOMMU
+ */
+struct iommu_cmd {
+ u32 data[4];
+};
+
+struct kmem_cache *amd_iommu_irq_cache;
+
+static void detach_device(struct device *dev);
+static int domain_enable_v2(struct protection_domain *domain, int pasids);
+
+/****************************************************************************
+ *
+ * Helper functions
+ *
+ ****************************************************************************/
+
+static inline int get_acpihid_device_id(struct device *dev,
+ struct acpihid_map_entry **entry)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct acpihid_map_entry *p;
+
+ if (!adev)
+ return -ENODEV;
+
+ list_for_each_entry(p, &acpihid_map, list) {
+ if (acpi_dev_hid_uid_match(adev, p->hid,
+ p->uid[0] ? p->uid : NULL)) {
+ if (entry)
+ *entry = p;
+ return p->devid;
+ }
+ }
+ return -EINVAL;
+}
+
+static inline int get_device_sbdf_id(struct device *dev)
+{
+ int sbdf;
+
+ if (dev_is_pci(dev))
+ sbdf = get_pci_sbdf_id(to_pci_dev(dev));
+ else
+ sbdf = get_acpihid_device_id(dev, NULL);
+
+ return sbdf;
+}
+
+struct dev_table_entry *get_dev_table(struct amd_iommu *iommu)
+{
+ struct dev_table_entry *dev_table;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ BUG_ON(pci_seg == NULL);
+ dev_table = pci_seg->dev_table;
+ BUG_ON(dev_table == NULL);
+
+ return dev_table;
+}
+
+static inline u16 get_device_segment(struct device *dev)
+{
+ u16 seg;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ seg = pci_domain_nr(pdev->bus);
+ } else {
+ u32 devid = get_acpihid_device_id(dev, NULL);
+
+ seg = PCI_SBDF_TO_SEGID(devid);
+ }
+
+ return seg;
+}
+
+/* Writes the specific IOMMU for a device into the PCI segment rlookup table */
+void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid)
+{
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ pci_seg->rlookup_table[devid] = iommu;
+}
+
+static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id == seg)
+ return pci_seg->rlookup_table[devid];
+ }
+ return NULL;
+}
+
+static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
+{
+ u16 seg = get_device_segment(dev);
+ int devid = get_device_sbdf_id(dev);
+
+ if (devid < 0)
+ return NULL;
+ return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid));
+}
+
+static struct protection_domain *to_pdomain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct protection_domain, domain);
+}
+
+static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
+{
+ struct iommu_dev_data *dev_data;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return NULL;
+
+ spin_lock_init(&dev_data->lock);
+ dev_data->devid = devid;
+ ratelimit_default_init(&dev_data->rs);
+
+ llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list);
+ return dev_data;
+}
+
+static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
+{
+ struct iommu_dev_data *dev_data;
+ struct llist_node *node;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ if (llist_empty(&pci_seg->dev_data_list))
+ return NULL;
+
+ node = pci_seg->dev_data_list.first;
+ llist_for_each_entry(dev_data, node, dev_data_list) {
+ if (dev_data->devid == devid)
+ return dev_data;
+ }
+
+ return NULL;
+}
+
+static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct amd_iommu *iommu;
+ struct dev_table_entry *dev_table;
+ u16 devid = pci_dev_id(pdev);
+
+ if (devid == alias)
+ return 0;
+
+ iommu = rlookup_amd_iommu(&pdev->dev);
+ if (!iommu)
+ return 0;
+
+ amd_iommu_set_rlookup_table(iommu, alias);
+ dev_table = get_dev_table(iommu);
+ memcpy(dev_table[alias].data,
+ dev_table[devid].data,
+ sizeof(dev_table[alias].data));
+
+ return 0;
+}
+
+static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
+{
+ struct pci_dev *pdev;
+
+ if (!dev_is_pci(dev))
+ return;
+ pdev = to_pci_dev(dev);
+
+ /*
+ * The IVRS alias stored in the alias table may not be
+ * part of the PCI DMA aliases if it's bus differs
+ * from the original device.
+ */
+ clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL);
+
+ pci_for_each_dma_alias(pdev, clone_alias, NULL);
+}
+
+static void setup_aliases(struct amd_iommu *iommu, struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+ u16 ivrs_alias;
+
+ /* For ACPI HID devices, there are no aliases */
+ if (!dev_is_pci(dev))
+ return;
+
+ /*
+ * Add the IVRS alias to the pci aliases if it is on the same
+ * bus. The IVRS table may know about a quirk that we don't.
+ */
+ ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)];
+ if (ivrs_alias != pci_dev_id(pdev) &&
+ PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
+ pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
+
+ clone_aliases(iommu, dev);
+}
+
+static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid)
+{
+ struct iommu_dev_data *dev_data;
+
+ dev_data = search_dev_data(iommu, devid);
+
+ if (dev_data == NULL) {
+ dev_data = alloc_dev_data(iommu, devid);
+ if (!dev_data)
+ return NULL;
+
+ if (translation_pre_enabled(iommu))
+ dev_data->defer_attach = true;
+ }
+
+ return dev_data;
+}
+
+/*
+* Find or create an IOMMU group for a acpihid device.
+*/
+static struct iommu_group *acpihid_device_group(struct device *dev)
+{
+ struct acpihid_map_entry *p, *entry = NULL;
+ int devid;
+
+ devid = get_acpihid_device_id(dev, &entry);
+ if (devid < 0)
+ return ERR_PTR(devid);
+
+ list_for_each_entry(p, &acpihid_map, list) {
+ if ((devid == p->devid) && p->group)
+ entry->group = p->group;
+ }
+
+ if (!entry->group)
+ entry->group = generic_device_group(dev);
+ else
+ iommu_group_ref_get(entry->group);
+
+ return entry->group;
+}
+
+static bool pci_iommuv2_capable(struct pci_dev *pdev)
+{
+ static const int caps[] = {
+ PCI_EXT_CAP_ID_PRI,
+ PCI_EXT_CAP_ID_PASID,
+ };
+ int i, pos;
+
+ if (!pci_ats_supported(pdev))
+ return false;
+
+ for (i = 0; i < 2; ++i) {
+ pos = pci_find_ext_capability(pdev, caps[i]);
+ if (pos == 0)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * This function checks if the driver got a valid device from the caller to
+ * avoid dereferencing invalid pointers.
+ */
+static bool check_device(struct device *dev)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+ struct amd_iommu *iommu;
+ int devid, sbdf;
+
+ if (!dev)
+ return false;
+
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
+ return false;
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return false;
+
+ /* Out of our scope? */
+ pci_seg = iommu->pci_seg;
+ if (devid > pci_seg->last_bdf)
+ return false;
+
+ return true;
+}
+
+static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
+{
+ struct iommu_dev_data *dev_data;
+ int devid, sbdf;
+
+ if (dev_iommu_priv_get(dev))
+ return 0;
+
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
+ return sbdf;
+
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ dev_data = find_dev_data(iommu, devid);
+ if (!dev_data)
+ return -ENOMEM;
+
+ dev_data->dev = dev;
+ setup_aliases(iommu, dev);
+
+ /*
+ * By default we use passthrough mode for IOMMUv2 capable device.
+ * But if amd_iommu=force_isolation is set (e.g. to debug DMA to
+ * invalid address), we ignore the capability for the device so
+ * it'll be forced to go into translation mode.
+ */
+ if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
+ dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
+ dev_data->iommu_v2 = iommu->is_iommu_v2;
+ }
+
+ dev_iommu_priv_set(dev, dev_data);
+
+ return 0;
+}
+
+static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
+{
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+ int devid, sbdf;
+
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
+ return;
+
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ pci_seg->rlookup_table[devid] = NULL;
+ memset(&dev_table[devid], 0, sizeof(struct dev_table_entry));
+
+ setup_aliases(iommu, dev);
+}
+
+static void amd_iommu_uninit_device(struct device *dev)
+{
+ struct iommu_dev_data *dev_data;
+
+ dev_data = dev_iommu_priv_get(dev);
+ if (!dev_data)
+ return;
+
+ if (dev_data->domain)
+ detach_device(dev);
+
+ dev_iommu_priv_set(dev, NULL);
+
+ /*
+ * We keep dev_data around for unplugged devices and reuse it when the
+ * device is re-plugged - not doing so would introduce a ton of races.
+ */
+}
+
+/****************************************************************************
+ *
+ * Interrupt handling functions
+ *
+ ****************************************************************************/
+
+static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
+{
+ int i;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ for (i = 0; i < 4; ++i)
+ pr_err("DTE[%d]: %016llx\n", i, dev_table[devid].data[i]);
+}
+
+static void dump_command(unsigned long phys_addr)
+{
+ struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
+}
+
+static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event)
+{
+ struct iommu_dev_data *dev_data = NULL;
+ int devid, vmg_tag, flags;
+ struct pci_dev *pdev;
+ u64 spa;
+
+ devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+ vmg_tag = (event[1]) & 0xFFFF;
+ flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+ spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
+
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
+ devid & 0xff);
+ if (pdev)
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+
+ if (dev_data) {
+ if (__ratelimit(&dev_data->rs)) {
+ pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
+ vmg_tag, spa, flags);
+ }
+ } else {
+ pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ vmg_tag, spa, flags);
+ }
+
+ if (pdev)
+ pci_dev_put(pdev);
+}
+
+static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event)
+{
+ struct iommu_dev_data *dev_data = NULL;
+ int devid, flags_rmp, vmg_tag, flags;
+ struct pci_dev *pdev;
+ u64 gpa;
+
+ devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+ flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF;
+ vmg_tag = (event[1]) & 0xFFFF;
+ flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+ gpa = ((u64)event[3] << 32) | event[2];
+
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
+ devid & 0xff);
+ if (pdev)
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+
+ if (dev_data) {
+ if (__ratelimit(&dev_data->rs)) {
+ pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
+ vmg_tag, gpa, flags_rmp, flags);
+ }
+ } else {
+ pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ vmg_tag, gpa, flags_rmp, flags);
+ }
+
+ if (pdev)
+ pci_dev_put(pdev);
+}
+
+#define IS_IOMMU_MEM_TRANSACTION(flags) \
+ (((flags) & EVENT_FLAG_I) == 0)
+
+#define IS_WRITE_REQUEST(flags) \
+ ((flags) & EVENT_FLAG_RW)
+
+static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
+ u16 devid, u16 domain_id,
+ u64 address, int flags)
+{
+ struct iommu_dev_data *dev_data = NULL;
+ struct pci_dev *pdev;
+
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
+ devid & 0xff);
+ if (pdev)
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+
+ if (dev_data) {
+ /*
+ * If this is a DMA fault (for which the I(nterrupt)
+ * bit will be unset), allow report_iommu_fault() to
+ * prevent logging it.
+ */
+ if (IS_IOMMU_MEM_TRANSACTION(flags)) {
+ /* Device not attached to domain properly */
+ if (dev_data->domain == NULL) {
+ pr_err_ratelimited("Event logged [Device not attached to domain properly]\n");
+ pr_err_ratelimited(" device=%04x:%02x:%02x.%x domain=0x%04x\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid),
+ PCI_FUNC(devid), domain_id);
+ goto out;
+ }
+
+ if (!report_iommu_fault(&dev_data->domain->domain,
+ &pdev->dev, address,
+ IS_WRITE_REQUEST(flags) ?
+ IOMMU_FAULT_WRITE :
+ IOMMU_FAULT_READ))
+ goto out;
+ }
+
+ if (__ratelimit(&dev_data->rs)) {
+ pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
+ domain_id, address, flags);
+ }
+ } else {
+ pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ domain_id, address, flags);
+ }
+
+out:
+ if (pdev)
+ pci_dev_put(pdev);
+}
+
+static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
+{
+ struct device *dev = iommu->iommu.dev;
+ int type, devid, flags, tag;
+ volatile u32 *event = __evt;
+ int count = 0;
+ u64 address;
+ u32 pasid;
+
+retry:
+ type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
+ devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+ pasid = (event[0] & EVENT_DOMID_MASK_HI) |
+ (event[1] & EVENT_DOMID_MASK_LO);
+ flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+ address = (u64)(((u64)event[3]) << 32) | event[2];
+
+ if (type == 0) {
+ /* Did we hit the erratum? */
+ if (++count == LOOP_TIMEOUT) {
+ pr_err("No event written to event log\n");
+ return;
+ }
+ udelay(1);
+ goto retry;
+ }
+
+ if (type == EVENT_TYPE_IO_FAULT) {
+ amd_iommu_report_page_fault(iommu, devid, pasid, address, flags);
+ return;
+ }
+
+ switch (type) {
+ case EVENT_TYPE_ILL_DEV:
+ dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pasid, address, flags);
+ dump_dte_entry(iommu, devid);
+ break;
+ case EVENT_TYPE_DEV_TAB_ERR:
+ dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x "
+ "address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ address, flags);
+ break;
+ case EVENT_TYPE_PAGE_TAB_ERR:
+ dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pasid, address, flags);
+ break;
+ case EVENT_TYPE_ILL_CMD:
+ dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address);
+ dump_command(address);
+ break;
+ case EVENT_TYPE_CMD_HARD_ERR:
+ dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n",
+ address, flags);
+ break;
+ case EVENT_TYPE_IOTLB_INV_TO:
+ dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%04x:%02x:%02x.%x address=0x%llx]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ address);
+ break;
+ case EVENT_TYPE_INV_DEV_REQ:
+ dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pasid, address, flags);
+ break;
+ case EVENT_TYPE_RMP_FAULT:
+ amd_iommu_report_rmp_fault(iommu, event);
+ break;
+ case EVENT_TYPE_RMP_HW_ERR:
+ amd_iommu_report_rmp_hw_error(iommu, event);
+ break;
+ case EVENT_TYPE_INV_PPR_REQ:
+ pasid = PPR_PASID(*((u64 *)__evt));
+ tag = event[1] & 0x03FF;
+ dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pasid, address, flags, tag);
+ break;
+ default:
+ dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
+ event[0], event[1], event[2], event[3]);
+ }
+
+ /*
+ * To detect the hardware errata 732 we need to clear the
+ * entry back to zero. This issue does not exist on SNP
+ * enabled system. Also this buffer is not writeable on
+ * SNP enabled system.
+ */
+ if (!amd_iommu_snp_en)
+ memset(__evt, 0, 4 * sizeof(u32));
+}
+
+static void iommu_poll_events(struct amd_iommu *iommu)
+{
+ u32 head, tail;
+
+ head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
+
+ while (head != tail) {
+ iommu_print_event(iommu, iommu->evt_buf + head);
+ head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
+ }
+
+ writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+}
+
+static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
+{
+ struct amd_iommu_fault fault;
+
+ if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
+ pr_err_ratelimited("Unknown PPR request received\n");
+ return;
+ }
+
+ fault.address = raw[1];
+ fault.pasid = PPR_PASID(raw[0]);
+ fault.sbdf = PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, PPR_DEVID(raw[0]));
+ fault.tag = PPR_TAG(raw[0]);
+ fault.flags = PPR_FLAGS(raw[0]);
+
+ atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
+}
+
+static void iommu_poll_ppr_log(struct amd_iommu *iommu)
+{
+ u32 head, tail;
+
+ if (iommu->ppr_log == NULL)
+ return;
+
+ head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+
+ while (head != tail) {
+ volatile u64 *raw;
+ u64 entry[2];
+ int i;
+
+ raw = (u64 *)(iommu->ppr_log + head);
+
+ /*
+ * Hardware bug: Interrupt may arrive before the entry is
+ * written to memory. If this happens we need to wait for the
+ * entry to arrive.
+ */
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ if (PPR_REQ_TYPE(raw[0]) != 0)
+ break;
+ udelay(1);
+ }
+
+ /* Avoid memcpy function-call overhead */
+ entry[0] = raw[0];
+ entry[1] = raw[1];
+
+ /*
+ * To detect the hardware errata 733 we need to clear the
+ * entry back to zero. This issue does not exist on SNP
+ * enabled system. Also this buffer is not writeable on
+ * SNP enabled system.
+ */
+ if (!amd_iommu_snp_en)
+ raw[0] = raw[1] = 0UL;
+
+ /* Update head pointer of hardware ring-buffer */
+ head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
+ writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+
+ /* Handle PPR entry */
+ iommu_handle_ppr_entry(iommu, entry);
+
+ /* Refresh ring-buffer information */
+ head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+ }
+}
+
+#ifdef CONFIG_IRQ_REMAP
+static int (*iommu_ga_log_notifier)(u32);
+
+int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
+{
+ iommu_ga_log_notifier = notifier;
+
+ return 0;
+}
+EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
+
+static void iommu_poll_ga_log(struct amd_iommu *iommu)
+{
+ u32 head, tail;
+
+ if (iommu->ga_log == NULL)
+ return;
+
+ head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
+
+ while (head != tail) {
+ volatile u64 *raw;
+ u64 log_entry;
+
+ raw = (u64 *)(iommu->ga_log + head);
+
+ /* Avoid memcpy function-call overhead */
+ log_entry = *raw;
+
+ /* Update head pointer of hardware ring-buffer */
+ head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
+ writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
+
+ /* Handle GA entry */
+ switch (GA_REQ_TYPE(log_entry)) {
+ case GA_GUEST_NR:
+ if (!iommu_ga_log_notifier)
+ break;
+
+ pr_debug("%s: devid=%#x, ga_tag=%#x\n",
+ __func__, GA_DEVID(log_entry),
+ GA_TAG(log_entry));
+
+ if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
+ pr_err("GA log notifier failed.\n");
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void
+amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu)
+{
+ if (!irq_remapping_enabled || !dev_is_pci(dev) ||
+ !pci_dev_has_default_msi_parent_domain(to_pci_dev(dev)))
+ return;
+
+ dev_set_msi_domain(dev, iommu->ir_domain);
+}
+
+#else /* CONFIG_IRQ_REMAP */
+static inline void
+amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
+#endif /* !CONFIG_IRQ_REMAP */
+
+static void amd_iommu_handle_irq(void *data, const char *evt_type,
+ u32 int_mask, u32 overflow_mask,
+ void (*int_handler)(struct amd_iommu *),
+ void (*overflow_handler)(struct amd_iommu *))
+{
+ struct amd_iommu *iommu = (struct amd_iommu *) data;
+ u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ u32 mask = int_mask | overflow_mask;
+
+ while (status & mask) {
+ /* Enable interrupt sources again */
+ writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+ if (int_handler) {
+ pr_devel("Processing IOMMU (ivhd%d) %s Log\n",
+ iommu->index, evt_type);
+ int_handler(iommu);
+ }
+
+ if ((status & overflow_mask) && overflow_handler)
+ overflow_handler(iommu);
+
+ /*
+ * Hardware bug: ERBT1312
+ * When re-enabling interrupt (by writing 1
+ * to clear the bit), the hardware might also try to set
+ * the interrupt bit in the event status register.
+ * In this scenario, the bit will be set, and disable
+ * subsequent interrupts.
+ *
+ * Workaround: The IOMMU driver should read back the
+ * status register and check if the interrupt bits are cleared.
+ * If not, driver will need to go through the interrupt handler
+ * again and re-clear the bits
+ */
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ }
+}
+
+irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data)
+{
+ amd_iommu_handle_irq(data, "Evt", MMIO_STATUS_EVT_INT_MASK,
+ MMIO_STATUS_EVT_OVERFLOW_MASK,
+ iommu_poll_events, amd_iommu_restart_event_logging);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data)
+{
+ amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK,
+ MMIO_STATUS_PPR_OVERFLOW_MASK,
+ iommu_poll_ppr_log, amd_iommu_restart_ppr_log);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t amd_iommu_int_thread_galog(int irq, void *data)
+{
+#ifdef CONFIG_IRQ_REMAP
+ amd_iommu_handle_irq(data, "GA", MMIO_STATUS_GALOG_INT_MASK,
+ MMIO_STATUS_GALOG_OVERFLOW_MASK,
+ iommu_poll_ga_log, amd_iommu_restart_ga_log);
+#endif
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t amd_iommu_int_thread(int irq, void *data)
+{
+ amd_iommu_int_thread_evtlog(irq, data);
+ amd_iommu_int_thread_pprlog(irq, data);
+ amd_iommu_int_thread_galog(irq, data);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t amd_iommu_int_handler(int irq, void *data)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+/****************************************************************************
+ *
+ * IOMMU command queuing functions
+ *
+ ****************************************************************************/
+
+static int wait_on_sem(struct amd_iommu *iommu, u64 data)
+{
+ int i = 0;
+
+ while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
+ udelay(1);
+ i += 1;
+ }
+
+ if (i == LOOP_TIMEOUT) {
+ pr_alert("Completion-Wait loop timed out\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void copy_cmd_to_buffer(struct amd_iommu *iommu,
+ struct iommu_cmd *cmd)
+{
+ u8 *target;
+ u32 tail;
+
+ /* Copy command to buffer */
+ tail = iommu->cmd_buf_tail;
+ target = iommu->cmd_buf + tail;
+ memcpy(target, cmd, sizeof(*cmd));
+
+ tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
+ iommu->cmd_buf_tail = tail;
+
+ /* Tell the IOMMU about it */
+ writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+}
+
+static void build_completion_wait(struct iommu_cmd *cmd,
+ struct amd_iommu *iommu,
+ u64 data)
+{
+ u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
+ cmd->data[1] = upper_32_bits(paddr);
+ cmd->data[2] = lower_32_bits(data);
+ cmd->data[3] = upper_32_bits(data);
+ CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
+}
+
+static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
+{
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->data[0] = devid;
+ CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
+}
+
+/*
+ * Builds an invalidation address which is suitable for one page or multiple
+ * pages. Sets the size bit (S) as needed is more than one page is flushed.
+ */
+static inline u64 build_inv_address(u64 address, size_t size)
+{
+ u64 pages, end, msb_diff;
+
+ pages = iommu_num_pages(address, size, PAGE_SIZE);
+
+ if (pages == 1)
+ return address & PAGE_MASK;
+
+ end = address + size - 1;
+
+ /*
+ * msb_diff would hold the index of the most significant bit that
+ * flipped between the start and end.
+ */
+ msb_diff = fls64(end ^ address) - 1;
+
+ /*
+ * Bits 63:52 are sign extended. If for some reason bit 51 is different
+ * between the start and the end, invalidate everything.
+ */
+ if (unlikely(msb_diff > 51)) {
+ address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
+ } else {
+ /*
+ * The msb-bit must be clear on the address. Just set all the
+ * lower bits.
+ */
+ address |= (1ull << msb_diff) - 1;
+ }
+
+ /* Clear bits 11:0 */
+ address &= PAGE_MASK;
+
+ /* Set the size bit - we flush more than one 4kb page */
+ return address | CMD_INV_IOMMU_PAGES_SIZE_MASK;
+}
+
+static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
+ size_t size, u16 domid, int pde)
+{
+ u64 inv_address = build_inv_address(address, size);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->data[1] |= domid;
+ cmd->data[2] = lower_32_bits(inv_address);
+ cmd->data[3] = upper_32_bits(inv_address);
+ CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
+ if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+}
+
+static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
+ u64 address, size_t size)
+{
+ u64 inv_address = build_inv_address(address, size);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->data[0] = devid;
+ cmd->data[0] |= (qdep & 0xff) << 24;
+ cmd->data[1] = devid;
+ cmd->data[2] = lower_32_bits(inv_address);
+ cmd->data[3] = upper_32_bits(inv_address);
+ CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
+}
+
+static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
+ u64 address, bool size)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ address &= ~(0xfffULL);
+
+ cmd->data[0] = pasid;
+ cmd->data[1] = domid;
+ cmd->data[2] = lower_32_bits(address);
+ cmd->data[3] = upper_32_bits(address);
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ if (size)
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+ CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
+}
+
+static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
+ int qdep, u64 address, bool size)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ address &= ~(0xfffULL);
+
+ cmd->data[0] = devid;
+ cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
+ cmd->data[0] |= (qdep & 0xff) << 24;
+ cmd->data[1] = devid;
+ cmd->data[1] |= (pasid & 0xff) << 16;
+ cmd->data[2] = lower_32_bits(address);
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ cmd->data[3] = upper_32_bits(address);
+ if (size)
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+ CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
+}
+
+static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
+ int status, int tag, bool gn)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->data[0] = devid;
+ if (gn) {
+ cmd->data[1] = pasid;
+ cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
+ }
+ cmd->data[3] = tag & 0x1ff;
+ cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
+
+ CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
+}
+
+static void build_inv_all(struct iommu_cmd *cmd)
+{
+ memset(cmd, 0, sizeof(*cmd));
+ CMD_SET_TYPE(cmd, CMD_INV_ALL);
+}
+
+static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
+{
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->data[0] = devid;
+ CMD_SET_TYPE(cmd, CMD_INV_IRT);
+}
+
+/*
+ * Writes the command to the IOMMUs command buffer and informs the
+ * hardware about the new command.
+ */
+static int __iommu_queue_command_sync(struct amd_iommu *iommu,
+ struct iommu_cmd *cmd,
+ bool sync)
+{
+ unsigned int count = 0;
+ u32 left, next_tail;
+
+ next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
+again:
+ left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE;
+
+ if (left <= 0x20) {
+ /* Skip udelay() the first time around */
+ if (count++) {
+ if (count == LOOP_TIMEOUT) {
+ pr_err("Command buffer timeout\n");
+ return -EIO;
+ }
+
+ udelay(1);
+ }
+
+ /* Update head and recheck remaining space */
+ iommu->cmd_buf_head = readl(iommu->mmio_base +
+ MMIO_CMD_HEAD_OFFSET);
+
+ goto again;
+ }
+
+ copy_cmd_to_buffer(iommu, cmd);
+
+ /* Do we need to make sure all commands are processed? */
+ iommu->need_sync = sync;
+
+ return 0;
+}
+
+static int iommu_queue_command_sync(struct amd_iommu *iommu,
+ struct iommu_cmd *cmd,
+ bool sync)
+{
+ unsigned long flags;
+ int ret;
+
+ raw_spin_lock_irqsave(&iommu->lock, flags);
+ ret = __iommu_queue_command_sync(iommu, cmd, sync);
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return ret;
+}
+
+static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
+{
+ return iommu_queue_command_sync(iommu, cmd, true);
+}
+
+/*
+ * This function queues a completion wait command into the command
+ * buffer of an IOMMU
+ */
+static int iommu_completion_wait(struct amd_iommu *iommu)
+{
+ struct iommu_cmd cmd;
+ unsigned long flags;
+ int ret;
+ u64 data;
+
+ if (!iommu->need_sync)
+ return 0;
+
+ data = atomic64_add_return(1, &iommu->cmd_sem_val);
+ build_completion_wait(&cmd, iommu, data);
+
+ raw_spin_lock_irqsave(&iommu->lock, flags);
+
+ ret = __iommu_queue_command_sync(iommu, &cmd, false);
+ if (ret)
+ goto out_unlock;
+
+ ret = wait_on_sem(iommu, data);
+
+out_unlock:
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return ret;
+}
+
+static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
+{
+ struct iommu_cmd cmd;
+
+ build_inv_dte(&cmd, devid);
+
+ return iommu_queue_command(iommu, &cmd);
+}
+
+static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
+{
+ u32 devid;
+ u16 last_bdf = iommu->pci_seg->last_bdf;
+
+ for (devid = 0; devid <= last_bdf; ++devid)
+ iommu_flush_dte(iommu, devid);
+
+ iommu_completion_wait(iommu);
+}
+
+/*
+ * This function uses heavy locking and may disable irqs for some time. But
+ * this is no issue because it is only called during resume.
+ */
+static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
+{
+ u32 dom_id;
+ u16 last_bdf = iommu->pci_seg->last_bdf;
+
+ for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
+ struct iommu_cmd cmd;
+ build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+ dom_id, 1);
+ iommu_queue_command(iommu, &cmd);
+ }
+
+ iommu_completion_wait(iommu);
+}
+
+static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
+{
+ struct iommu_cmd cmd;
+
+ build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+ dom_id, 1);
+ iommu_queue_command(iommu, &cmd);
+
+ iommu_completion_wait(iommu);
+}
+
+static void amd_iommu_flush_all(struct amd_iommu *iommu)
+{
+ struct iommu_cmd cmd;
+
+ build_inv_all(&cmd);
+
+ iommu_queue_command(iommu, &cmd);
+ iommu_completion_wait(iommu);
+}
+
+static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
+{
+ struct iommu_cmd cmd;
+
+ build_inv_irt(&cmd, devid);
+
+ iommu_queue_command(iommu, &cmd);
+}
+
+static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
+{
+ u32 devid;
+ u16 last_bdf = iommu->pci_seg->last_bdf;
+
+ if (iommu->irtcachedis_enabled)
+ return;
+
+ for (devid = 0; devid <= last_bdf; devid++)
+ iommu_flush_irt(iommu, devid);
+
+ iommu_completion_wait(iommu);
+}
+
+void iommu_flush_all_caches(struct amd_iommu *iommu)
+{
+ if (iommu_feature(iommu, FEATURE_IA)) {
+ amd_iommu_flush_all(iommu);
+ } else {
+ amd_iommu_flush_dte_all(iommu);
+ amd_iommu_flush_irt_all(iommu);
+ amd_iommu_flush_tlb_all(iommu);
+ }
+}
+
+/*
+ * Command send function for flushing on-device TLB
+ */
+static int device_flush_iotlb(struct iommu_dev_data *dev_data,
+ u64 address, size_t size)
+{
+ struct amd_iommu *iommu;
+ struct iommu_cmd cmd;
+ int qdep;
+
+ qdep = dev_data->ats.qdep;
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return -EINVAL;
+
+ build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
+
+ return iommu_queue_command(iommu, &cmd);
+}
+
+static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct amd_iommu *iommu = data;
+
+ return iommu_flush_dte(iommu, alias);
+}
+
+/*
+ * Command send function for invalidating a device table entry
+ */
+static int device_flush_dte(struct iommu_dev_data *dev_data)
+{
+ struct amd_iommu *iommu;
+ struct pci_dev *pdev = NULL;
+ struct amd_iommu_pci_seg *pci_seg;
+ u16 alias;
+ int ret;
+
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return -EINVAL;
+
+ if (dev_is_pci(dev_data->dev))
+ pdev = to_pci_dev(dev_data->dev);
+
+ if (pdev)
+ ret = pci_for_each_dma_alias(pdev,
+ device_flush_dte_alias, iommu);
+ else
+ ret = iommu_flush_dte(iommu, dev_data->devid);
+ if (ret)
+ return ret;
+
+ pci_seg = iommu->pci_seg;
+ alias = pci_seg->alias_table[dev_data->devid];
+ if (alias != dev_data->devid) {
+ ret = iommu_flush_dte(iommu, alias);
+ if (ret)
+ return ret;
+ }
+
+ if (dev_data->ats.enabled)
+ ret = device_flush_iotlb(dev_data, 0, ~0UL);
+
+ return ret;
+}
+
+/*
+ * TLB invalidation function which is called from the mapping functions.
+ * It invalidates a single PTE if the range to flush is within a single
+ * page. Otherwise it flushes the whole TLB of the IOMMU.
+ */
+static void __domain_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size, int pde)
+{
+ struct iommu_dev_data *dev_data;
+ struct iommu_cmd cmd;
+ int ret = 0, i;
+
+ build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
+
+ for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
+ if (!domain->dev_iommu[i])
+ continue;
+
+ /*
+ * Devices of this domain are behind this IOMMU
+ * We need a TLB flush
+ */
+ ret |= iommu_queue_command(amd_iommus[i], &cmd);
+ }
+
+ list_for_each_entry(dev_data, &domain->dev_list, list) {
+
+ if (!dev_data->ats.enabled)
+ continue;
+
+ ret |= device_flush_iotlb(dev_data, address, size);
+ }
+
+ WARN_ON(ret);
+}
+
+static void domain_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size, int pde)
+{
+ if (likely(!amd_iommu_np_cache)) {
+ __domain_flush_pages(domain, address, size, pde);
+ return;
+ }
+
+ /*
+ * When NpCache is on, we infer that we run in a VM and use a vIOMMU.
+ * In such setups it is best to avoid flushes of ranges which are not
+ * naturally aligned, since it would lead to flushes of unmodified
+ * PTEs. Such flushes would require the hypervisor to do more work than
+ * necessary. Therefore, perform repeated flushes of aligned ranges
+ * until you cover the range. Each iteration flushes the smaller
+ * between the natural alignment of the address that we flush and the
+ * greatest naturally aligned region that fits in the range.
+ */
+ while (size != 0) {
+ int addr_alignment = __ffs(address);
+ int size_alignment = __fls(size);
+ int min_alignment;
+ size_t flush_size;
+
+ /*
+ * size is always non-zero, but address might be zero, causing
+ * addr_alignment to be negative. As the casting of the
+ * argument in __ffs(address) to long might trim the high bits
+ * of the address on x86-32, cast to long when doing the check.
+ */
+ if (likely((unsigned long)address != 0))
+ min_alignment = min(addr_alignment, size_alignment);
+ else
+ min_alignment = size_alignment;
+
+ flush_size = 1ul << min_alignment;
+
+ __domain_flush_pages(domain, address, flush_size, pde);
+ address += flush_size;
+ size -= flush_size;
+ }
+}
+
+/* Flush the whole IO/TLB for a given protection domain - including PDE */
+void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
+{
+ domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
+}
+
+void amd_iommu_domain_flush_complete(struct protection_domain *domain)
+{
+ int i;
+
+ for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
+ if (domain && !domain->dev_iommu[i])
+ continue;
+
+ /*
+ * Devices of this domain are behind this IOMMU
+ * We need to wait for completion of all commands.
+ */
+ iommu_completion_wait(amd_iommus[i]);
+ }
+}
+
+/* Flush the not present cache if it exists */
+static void domain_flush_np_cache(struct protection_domain *domain,
+ dma_addr_t iova, size_t size)
+{
+ if (unlikely(amd_iommu_np_cache)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ domain_flush_pages(domain, iova, size, 1);
+ amd_iommu_domain_flush_complete(domain);
+ spin_unlock_irqrestore(&domain->lock, flags);
+ }
+}
+
+
+/*
+ * This function flushes the DTEs for all devices in domain
+ */
+static void domain_flush_devices(struct protection_domain *domain)
+{
+ struct iommu_dev_data *dev_data;
+
+ list_for_each_entry(dev_data, &domain->dev_list, list)
+ device_flush_dte(dev_data);
+}
+
+/****************************************************************************
+ *
+ * The next functions belong to the domain allocation. A domain is
+ * allocated for every IOMMU as the default domain. If device isolation
+ * is enabled, every device get its own domain. The most important thing
+ * about domains is the page table mapping the DMA address space they
+ * contain.
+ *
+ ****************************************************************************/
+
+static u16 domain_id_alloc(void)
+{
+ int id;
+
+ spin_lock(&pd_bitmap_lock);
+ id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
+ BUG_ON(id == 0);
+ if (id > 0 && id < MAX_DOMAIN_ID)
+ __set_bit(id, amd_iommu_pd_alloc_bitmap);
+ else
+ id = 0;
+ spin_unlock(&pd_bitmap_lock);
+
+ return id;
+}
+
+static void domain_id_free(int id)
+{
+ spin_lock(&pd_bitmap_lock);
+ if (id > 0 && id < MAX_DOMAIN_ID)
+ __clear_bit(id, amd_iommu_pd_alloc_bitmap);
+ spin_unlock(&pd_bitmap_lock);
+}
+
+static void free_gcr3_tbl_level1(u64 *tbl)
+{
+ u64 *ptr;
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (!(tbl[i] & GCR3_VALID))
+ continue;
+
+ ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
+
+ free_page((unsigned long)ptr);
+ }
+}
+
+static void free_gcr3_tbl_level2(u64 *tbl)
+{
+ u64 *ptr;
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (!(tbl[i] & GCR3_VALID))
+ continue;
+
+ ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
+
+ free_gcr3_tbl_level1(ptr);
+ }
+}
+
+static void free_gcr3_table(struct protection_domain *domain)
+{
+ if (domain->glx == 2)
+ free_gcr3_tbl_level2(domain->gcr3_tbl);
+ else if (domain->glx == 1)
+ free_gcr3_tbl_level1(domain->gcr3_tbl);
+ else
+ BUG_ON(domain->glx != 0);
+
+ free_page((unsigned long)domain->gcr3_tbl);
+}
+
+static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
+ struct protection_domain *domain, bool ats, bool ppr)
+{
+ u64 pte_root = 0;
+ u64 flags = 0;
+ u32 old_domid;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ if (domain->iop.mode != PAGE_MODE_NONE)
+ pte_root = iommu_virt_to_phys(domain->iop.root);
+
+ pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
+ << DEV_ENTRY_MODE_SHIFT;
+
+ pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
+
+ /*
+ * When SNP is enabled, Only set TV bit when IOMMU
+ * page translation is in use.
+ */
+ if (!amd_iommu_snp_en || (domain->id != 0))
+ pte_root |= DTE_FLAG_TV;
+
+ flags = dev_table[devid].data[1];
+
+ if (ats)
+ flags |= DTE_FLAG_IOTLB;
+
+ if (ppr) {
+ if (iommu_feature(iommu, FEATURE_EPHSUP))
+ pte_root |= 1ULL << DEV_ENTRY_PPR;
+ }
+
+ if (domain->flags & PD_IOMMUV2_MASK) {
+ u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
+ u64 glx = domain->glx;
+ u64 tmp;
+
+ pte_root |= DTE_FLAG_GV;
+ pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
+
+ /* First mask out possible old values for GCR3 table */
+ tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
+ flags &= ~tmp;
+
+ tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
+ flags &= ~tmp;
+
+ /* Encode GCR3 table into DTE */
+ tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
+ pte_root |= tmp;
+
+ tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
+ flags |= tmp;
+
+ tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
+ flags |= tmp;
+
+ if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) {
+ dev_table[devid].data[2] |=
+ ((u64)GUEST_PGTABLE_5_LEVEL << DTE_GPT_LEVEL_SHIFT);
+ }
+
+ if (domain->flags & PD_GIOV_MASK)
+ pte_root |= DTE_FLAG_GIOV;
+ }
+
+ flags &= ~DEV_DOMID_MASK;
+ flags |= domain->id;
+
+ old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK;
+ dev_table[devid].data[1] = flags;
+ dev_table[devid].data[0] = pte_root;
+
+ /*
+ * A kdump kernel might be replacing a domain ID that was copied from
+ * the previous kernel--if so, it needs to flush the translation cache
+ * entries for the old domain ID that is being overwritten
+ */
+ if (old_domid) {
+ amd_iommu_flush_tlb_domid(iommu, old_domid);
+ }
+}
+
+static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
+{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ /* remove entry from the device table seen by the hardware */
+ dev_table[devid].data[0] = DTE_FLAG_V;
+
+ if (!amd_iommu_snp_en)
+ dev_table[devid].data[0] |= DTE_FLAG_TV;
+
+ dev_table[devid].data[1] &= DTE_FLAG_MASK;
+
+ amd_iommu_apply_erratum_63(iommu, devid);
+}
+
+static void do_attach(struct iommu_dev_data *dev_data,
+ struct protection_domain *domain)
+{
+ struct amd_iommu *iommu;
+ bool ats;
+
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return;
+ ats = dev_data->ats.enabled;
+
+ /* Update data structures */
+ dev_data->domain = domain;
+ list_add(&dev_data->list, &domain->dev_list);
+
+ /* Update NUMA Node ID */
+ if (domain->nid == NUMA_NO_NODE)
+ domain->nid = dev_to_node(dev_data->dev);
+
+ /* Do reference counting */
+ domain->dev_iommu[iommu->index] += 1;
+ domain->dev_cnt += 1;
+
+ /* Update device table */
+ set_dte_entry(iommu, dev_data->devid, domain,
+ ats, dev_data->iommu_v2);
+ clone_aliases(iommu, dev_data->dev);
+
+ device_flush_dte(dev_data);
+}
+
+static void do_detach(struct iommu_dev_data *dev_data)
+{
+ struct protection_domain *domain = dev_data->domain;
+ struct amd_iommu *iommu;
+
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return;
+
+ /* Update data structures */
+ dev_data->domain = NULL;
+ list_del(&dev_data->list);
+ clear_dte_entry(iommu, dev_data->devid);
+ clone_aliases(iommu, dev_data->dev);
+
+ /* Flush the DTE entry */
+ device_flush_dte(dev_data);
+
+ /* Flush IOTLB */
+ amd_iommu_domain_flush_tlb_pde(domain);
+
+ /* Wait for the flushes to finish */
+ amd_iommu_domain_flush_complete(domain);
+
+ /* decrease reference counters - needs to happen after the flushes */
+ domain->dev_iommu[iommu->index] -= 1;
+ domain->dev_cnt -= 1;
+}
+
+static void pdev_iommuv2_disable(struct pci_dev *pdev)
+{
+ pci_disable_ats(pdev);
+ pci_disable_pri(pdev);
+ pci_disable_pasid(pdev);
+}
+
+static int pdev_pri_ats_enable(struct pci_dev *pdev)
+{
+ int ret;
+
+ /* Only allow access to user-accessible pages */
+ ret = pci_enable_pasid(pdev, 0);
+ if (ret)
+ return ret;
+
+ /* First reset the PRI state of the device */
+ ret = pci_reset_pri(pdev);
+ if (ret)
+ goto out_err_pasid;
+
+ /* Enable PRI */
+ /* FIXME: Hardcode number of outstanding requests for now */
+ ret = pci_enable_pri(pdev, 32);
+ if (ret)
+ goto out_err_pasid;
+
+ ret = pci_enable_ats(pdev, PAGE_SHIFT);
+ if (ret)
+ goto out_err_pri;
+
+ return 0;
+
+out_err_pri:
+ pci_disable_pri(pdev);
+
+out_err_pasid:
+ pci_disable_pasid(pdev);
+
+ return ret;
+}
+
+/*
+ * If a device is not yet associated with a domain, this function makes the
+ * device visible in the domain
+ */
+static int attach_device(struct device *dev,
+ struct protection_domain *domain)
+{
+ struct iommu_dev_data *dev_data;
+ struct pci_dev *pdev;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ dev_data = dev_iommu_priv_get(dev);
+
+ spin_lock(&dev_data->lock);
+
+ ret = -EBUSY;
+ if (dev_data->domain != NULL)
+ goto out;
+
+ if (!dev_is_pci(dev))
+ goto skip_ats_check;
+
+ pdev = to_pci_dev(dev);
+ if (domain->flags & PD_IOMMUV2_MASK) {
+ struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
+
+ ret = -EINVAL;
+
+ /*
+ * In case of using AMD_IOMMU_V1 page table mode and the device
+ * is enabling for PPR/ATS support (using v2 table),
+ * we need to make sure that the domain type is identity map.
+ */
+ if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ def_domain->type != IOMMU_DOMAIN_IDENTITY) {
+ goto out;
+ }
+
+ if (dev_data->iommu_v2) {
+ if (pdev_pri_ats_enable(pdev) != 0)
+ goto out;
+
+ dev_data->ats.enabled = true;
+ dev_data->ats.qdep = pci_ats_queue_depth(pdev);
+ dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev);
+ }
+ } else if (amd_iommu_iotlb_sup &&
+ pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
+ dev_data->ats.enabled = true;
+ dev_data->ats.qdep = pci_ats_queue_depth(pdev);
+ }
+
+skip_ats_check:
+ ret = 0;
+
+ do_attach(dev_data, domain);
+
+ /*
+ * We might boot into a crash-kernel here. The crashed kernel
+ * left the caches in the IOMMU dirty. So we have to flush
+ * here to evict all dirty stuff.
+ */
+ amd_iommu_domain_flush_tlb_pde(domain);
+
+ amd_iommu_domain_flush_complete(domain);
+
+out:
+ spin_unlock(&dev_data->lock);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+
+/*
+ * Removes a device from a protection domain (with devtable_lock held)
+ */
+static void detach_device(struct device *dev)
+{
+ struct protection_domain *domain;
+ struct iommu_dev_data *dev_data;
+ unsigned long flags;
+
+ dev_data = dev_iommu_priv_get(dev);
+ domain = dev_data->domain;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ spin_lock(&dev_data->lock);
+
+ /*
+ * First check if the device is still attached. It might already
+ * be detached from its domain because the generic
+ * iommu_detach_group code detached it and we try again here in
+ * our alias handling.
+ */
+ if (WARN_ON(!dev_data->domain))
+ goto out;
+
+ do_detach(dev_data);
+
+ if (!dev_is_pci(dev))
+ goto out;
+
+ if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
+ pdev_iommuv2_disable(to_pci_dev(dev));
+ else if (dev_data->ats.enabled)
+ pci_disable_ats(to_pci_dev(dev));
+
+ dev_data->ats.enabled = false;
+
+out:
+ spin_unlock(&dev_data->lock);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+}
+
+static struct iommu_device *amd_iommu_probe_device(struct device *dev)
+{
+ struct iommu_device *iommu_dev;
+ struct amd_iommu *iommu;
+ int ret;
+
+ if (!check_device(dev))
+ return ERR_PTR(-ENODEV);
+
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return ERR_PTR(-ENODEV);
+
+ /* Not registered yet? */
+ if (!iommu->iommu.ops)
+ return ERR_PTR(-ENODEV);
+
+ if (dev_iommu_priv_get(dev))
+ return &iommu->iommu;
+
+ ret = iommu_init_device(iommu, dev);
+ if (ret) {
+ if (ret != -ENOTSUPP)
+ dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
+ iommu_dev = ERR_PTR(ret);
+ iommu_ignore_device(iommu, dev);
+ } else {
+ amd_iommu_set_pci_msi_domain(dev, iommu);
+ iommu_dev = &iommu->iommu;
+ }
+
+ iommu_completion_wait(iommu);
+
+ return iommu_dev;
+}
+
+static void amd_iommu_probe_finalize(struct device *dev)
+{
+ /* Domains are initialized for this device - have a look what we ended up with */
+ set_dma_ops(dev, NULL);
+ iommu_setup_dma_ops(dev, 0, U64_MAX);
+}
+
+static void amd_iommu_release_device(struct device *dev)
+{
+ struct amd_iommu *iommu;
+
+ if (!check_device(dev))
+ return;
+
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return;
+
+ amd_iommu_uninit_device(dev);
+ iommu_completion_wait(iommu);
+}
+
+static struct iommu_group *amd_iommu_device_group(struct device *dev)
+{
+ if (dev_is_pci(dev))
+ return pci_device_group(dev);
+
+ return acpihid_device_group(dev);
+}
+
+/*****************************************************************************
+ *
+ * The next functions belong to the dma_ops mapping/unmapping code.
+ *
+ *****************************************************************************/
+
+static void update_device_table(struct protection_domain *domain)
+{
+ struct iommu_dev_data *dev_data;
+
+ list_for_each_entry(dev_data, &domain->dev_list, list) {
+ struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
+
+ if (!iommu)
+ continue;
+ set_dte_entry(iommu, dev_data->devid, domain,
+ dev_data->ats.enabled, dev_data->iommu_v2);
+ clone_aliases(iommu, dev_data->dev);
+ }
+}
+
+void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
+{
+ update_device_table(domain);
+ domain_flush_devices(domain);
+}
+
+void amd_iommu_domain_update(struct protection_domain *domain)
+{
+ /* Update device table */
+ amd_iommu_update_and_flush_device_table(domain);
+
+ /* Flush domain TLB(s) and wait for completion */
+ amd_iommu_domain_flush_tlb_pde(domain);
+ amd_iommu_domain_flush_complete(domain);
+}
+
+/*****************************************************************************
+ *
+ * The following functions belong to the exported interface of AMD IOMMU
+ *
+ * This interface allows access to lower level functions of the IOMMU
+ * like protection domain handling and assignement of devices to domains
+ * which is not possible with the dma_ops interface.
+ *
+ *****************************************************************************/
+
+static void cleanup_domain(struct protection_domain *domain)
+{
+ struct iommu_dev_data *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ while (!list_empty(&domain->dev_list)) {
+ entry = list_first_entry(&domain->dev_list,
+ struct iommu_dev_data, list);
+ BUG_ON(!entry->domain);
+ do_detach(entry);
+ }
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+}
+
+static void protection_domain_free(struct protection_domain *domain)
+{
+ if (!domain)
+ return;
+
+ if (domain->iop.pgtbl_cfg.tlb)
+ free_io_pgtable_ops(&domain->iop.iop.ops);
+
+ if (domain->id)
+ domain_id_free(domain->id);
+
+ kfree(domain);
+}
+
+static int protection_domain_init_v1(struct protection_domain *domain, int mode)
+{
+ u64 *pt_root = NULL;
+
+ BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
+
+ spin_lock_init(&domain->lock);
+ domain->id = domain_id_alloc();
+ if (!domain->id)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&domain->dev_list);
+
+ if (mode != PAGE_MODE_NONE) {
+ pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!pt_root) {
+ domain_id_free(domain->id);
+ return -ENOMEM;
+ }
+ }
+
+ amd_iommu_domain_set_pgtable(domain, pt_root, mode);
+
+ return 0;
+}
+
+static int protection_domain_init_v2(struct protection_domain *domain)
+{
+ spin_lock_init(&domain->lock);
+ domain->id = domain_id_alloc();
+ if (!domain->id)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&domain->dev_list);
+
+ domain->flags |= PD_GIOV_MASK;
+
+ domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
+
+ if (domain_enable_v2(domain, 1)) {
+ domain_id_free(domain->id);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct protection_domain *protection_domain_alloc(unsigned int type)
+{
+ struct io_pgtable_ops *pgtbl_ops;
+ struct protection_domain *domain;
+ int pgtable;
+ int mode = DEFAULT_PGTABLE_LEVEL;
+ int ret;
+
+ /*
+ * Force IOMMU v1 page table when iommu=pt and
+ * when allocating domain for pass-through devices.
+ */
+ if (type == IOMMU_DOMAIN_IDENTITY) {
+ pgtable = AMD_IOMMU_V1;
+ mode = PAGE_MODE_NONE;
+ } else if (type == IOMMU_DOMAIN_UNMANAGED) {
+ pgtable = AMD_IOMMU_V1;
+ } else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) {
+ pgtable = amd_iommu_pgtable;
+ } else {
+ return NULL;
+ }
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+
+ switch (pgtable) {
+ case AMD_IOMMU_V1:
+ ret = protection_domain_init_v1(domain, mode);
+ break;
+ case AMD_IOMMU_V2:
+ ret = protection_domain_init_v2(domain);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto out_err;
+
+ /* No need to allocate io pgtable ops in passthrough mode */
+ if (type == IOMMU_DOMAIN_IDENTITY)
+ return domain;
+
+ domain->nid = NUMA_NO_NODE;
+
+ pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
+ if (!pgtbl_ops) {
+ domain_id_free(domain->id);
+ goto out_err;
+ }
+
+ return domain;
+out_err:
+ kfree(domain);
+ return NULL;
+}
+
+static inline u64 dma_max_address(void)
+{
+ if (amd_iommu_pgtable == AMD_IOMMU_V1)
+ return ~0ULL;
+
+ /* V2 with 4/5 level page table */
+ return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1);
+}
+
+static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
+{
+ struct protection_domain *domain;
+
+ /*
+ * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
+ * default to use IOMMU_DOMAIN_DMA[_FQ].
+ */
+ if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
+ return NULL;
+
+ domain = protection_domain_alloc(type);
+ if (!domain)
+ return NULL;
+
+ domain->domain.geometry.aperture_start = 0;
+ domain->domain.geometry.aperture_end = dma_max_address();
+ domain->domain.geometry.force_aperture = true;
+
+ return &domain->domain;
+}
+
+static void amd_iommu_domain_free(struct iommu_domain *dom)
+{
+ struct protection_domain *domain;
+
+ domain = to_pdomain(dom);
+
+ if (domain->dev_cnt > 0)
+ cleanup_domain(domain);
+
+ BUG_ON(domain->dev_cnt != 0);
+
+ if (!dom)
+ return;
+
+ if (domain->flags & PD_IOMMUV2_MASK)
+ free_gcr3_table(domain);
+
+ protection_domain_free(domain);
+}
+
+static int amd_iommu_attach_device(struct iommu_domain *dom,
+ struct device *dev)
+{
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+ struct protection_domain *domain = to_pdomain(dom);
+ struct amd_iommu *iommu = rlookup_amd_iommu(dev);
+ int ret;
+
+ /*
+ * Skip attach device to domain if new domain is same as
+ * devices current domain
+ */
+ if (dev_data->domain == domain)
+ return 0;
+
+ dev_data->defer_attach = false;
+
+ if (dev_data->domain)
+ detach_device(dev);
+
+ ret = attach_device(dev, domain);
+
+#ifdef CONFIG_IRQ_REMAP
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
+ if (dom->type == IOMMU_DOMAIN_UNMANAGED)
+ dev_data->use_vapic = 1;
+ else
+ dev_data->use_vapic = 0;
+ }
+#endif
+
+ iommu_completion_wait(iommu);
+
+ return ret;
+}
+
+static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
+ unsigned long iova, size_t size)
+{
+ struct protection_domain *domain = to_pdomain(dom);
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
+
+ if (ops->map_pages)
+ domain_flush_np_cache(domain, iova, size);
+}
+
+static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int iommu_prot, gfp_t gfp, size_t *mapped)
+{
+ struct protection_domain *domain = to_pdomain(dom);
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
+ int prot = 0;
+ int ret = -EINVAL;
+
+ if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ (domain->iop.mode == PAGE_MODE_NONE))
+ return -EINVAL;
+
+ if (iommu_prot & IOMMU_READ)
+ prot |= IOMMU_PROT_IR;
+ if (iommu_prot & IOMMU_WRITE)
+ prot |= IOMMU_PROT_IW;
+
+ if (ops->map_pages) {
+ ret = ops->map_pages(ops, iova, paddr, pgsize,
+ pgcount, prot, gfp, mapped);
+ }
+
+ return ret;
+}
+
+static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ /*
+ * AMD's IOMMU can flush as many pages as necessary in a single flush.
+ * Unless we run in a virtual machine, which can be inferred according
+ * to whether "non-present cache" is on, it is probably best to prefer
+ * (potentially) too extensive TLB flushing (i.e., more misses) over
+ * mutliple TLB flushes (i.e., more flushes). For virtual machines the
+ * hypervisor needs to synchronize the host IOMMU PTEs with those of
+ * the guest, and the trade-off is different: unnecessary TLB flushes
+ * should be avoided.
+ */
+ if (amd_iommu_np_cache &&
+ iommu_iotlb_gather_is_disjoint(gather, iova, size))
+ iommu_iotlb_sync(domain, gather);
+
+ iommu_iotlb_gather_add_range(gather, iova, size);
+}
+
+static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
+{
+ struct protection_domain *domain = to_pdomain(dom);
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
+ size_t r;
+
+ if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ (domain->iop.mode == PAGE_MODE_NONE))
+ return 0;
+
+ r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0;
+
+ if (r)
+ amd_iommu_iotlb_gather_add_page(dom, gather, iova, r);
+
+ return r;
+}
+
+static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
+ dma_addr_t iova)
+{
+ struct protection_domain *domain = to_pdomain(dom);
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
+
+ return ops->iova_to_phys(ops, iova);
+}
+
+static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
+{
+ switch (cap) {
+ case IOMMU_CAP_CACHE_COHERENCY:
+ return true;
+ case IOMMU_CAP_NOEXEC:
+ return false;
+ case IOMMU_CAP_PRE_BOOT_PROTECTION:
+ return amdr_ivrs_remap_support;
+ case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
+ return true;
+ case IOMMU_CAP_DEFERRED_FLUSH:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static void amd_iommu_get_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_resv_region *region;
+ struct unity_map_entry *entry;
+ struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
+ int devid, sbdf;
+
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
+ return;
+
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return;
+ pci_seg = iommu->pci_seg;
+
+ list_for_each_entry(entry, &pci_seg->unity_map, list) {
+ int type, prot = 0;
+ size_t length;
+
+ if (devid < entry->devid_start || devid > entry->devid_end)
+ continue;
+
+ type = IOMMU_RESV_DIRECT;
+ length = entry->address_end - entry->address_start;
+ if (entry->prot & IOMMU_PROT_IR)
+ prot |= IOMMU_READ;
+ if (entry->prot & IOMMU_PROT_IW)
+ prot |= IOMMU_WRITE;
+ if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
+ /* Exclusion range */
+ type = IOMMU_RESV_RESERVED;
+
+ region = iommu_alloc_resv_region(entry->address_start,
+ length, prot, type,
+ GFP_KERNEL);
+ if (!region) {
+ dev_err(dev, "Out of memory allocating dm-regions\n");
+ return;
+ }
+ list_add_tail(&region->list, head);
+ }
+
+ region = iommu_alloc_resv_region(MSI_RANGE_START,
+ MSI_RANGE_END - MSI_RANGE_START + 1,
+ 0, IOMMU_RESV_MSI, GFP_KERNEL);
+ if (!region)
+ return;
+ list_add_tail(&region->list, head);
+
+ region = iommu_alloc_resv_region(HT_RANGE_START,
+ HT_RANGE_END - HT_RANGE_START + 1,
+ 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
+ if (!region)
+ return;
+ list_add_tail(&region->list, head);
+}
+
+bool amd_iommu_is_attach_deferred(struct device *dev)
+{
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+
+ return dev_data->defer_attach;
+}
+EXPORT_SYMBOL_GPL(amd_iommu_is_attach_deferred);
+
+static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct protection_domain *dom = to_pdomain(domain);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dom->lock, flags);
+ amd_iommu_domain_flush_tlb_pde(dom);
+ amd_iommu_domain_flush_complete(dom);
+ spin_unlock_irqrestore(&dom->lock, flags);
+}
+
+static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
+{
+ struct protection_domain *dom = to_pdomain(domain);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dom->lock, flags);
+ domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1);
+ amd_iommu_domain_flush_complete(dom);
+ spin_unlock_irqrestore(&dom->lock, flags);
+}
+
+static int amd_iommu_def_domain_type(struct device *dev)
+{
+ struct iommu_dev_data *dev_data;
+
+ dev_data = dev_iommu_priv_get(dev);
+ if (!dev_data)
+ return 0;
+
+ /*
+ * Do not identity map IOMMUv2 capable devices when:
+ * - memory encryption is active, because some of those devices
+ * (AMD GPUs) don't have the encryption bit in their DMA-mask
+ * and require remapping.
+ * - SNP is enabled, because it prohibits DTE[Mode]=0.
+ */
+ if (dev_data->iommu_v2 &&
+ !cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
+ !amd_iommu_snp_en) {
+ return IOMMU_DOMAIN_IDENTITY;
+ }
+
+ return 0;
+}
+
+static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
+{
+ /* IOMMU_PTE_FC is always set */
+ return true;
+}
+
+const struct iommu_ops amd_iommu_ops = {
+ .capable = amd_iommu_capable,
+ .domain_alloc = amd_iommu_domain_alloc,
+ .probe_device = amd_iommu_probe_device,
+ .release_device = amd_iommu_release_device,
+ .probe_finalize = amd_iommu_probe_finalize,
+ .device_group = amd_iommu_device_group,
+ .get_resv_regions = amd_iommu_get_resv_regions,
+ .is_attach_deferred = amd_iommu_is_attach_deferred,
+ .pgsize_bitmap = AMD_IOMMU_PGSIZES,
+ .def_domain_type = amd_iommu_def_domain_type,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = amd_iommu_attach_device,
+ .map_pages = amd_iommu_map_pages,
+ .unmap_pages = amd_iommu_unmap_pages,
+ .iotlb_sync_map = amd_iommu_iotlb_sync_map,
+ .iova_to_phys = amd_iommu_iova_to_phys,
+ .flush_iotlb_all = amd_iommu_flush_iotlb_all,
+ .iotlb_sync = amd_iommu_iotlb_sync,
+ .free = amd_iommu_domain_free,
+ .enforce_cache_coherency = amd_iommu_enforce_cache_coherency,
+ }
+};
+
+/*****************************************************************************
+ *
+ * The next functions do a basic initialization of IOMMU for pass through
+ * mode
+ *
+ * In passthrough mode the IOMMU is initialized and enabled but not used for
+ * DMA-API translation.
+ *
+ *****************************************************************************/
+
+/* IOMMUv2 specific functions */
+int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ppr_notifier, nb);
+}
+EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
+
+int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ppr_notifier, nb);
+}
+EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
+
+void amd_iommu_domain_direct_map(struct iommu_domain *dom)
+{
+ struct protection_domain *domain = to_pdomain(dom);
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ if (domain->iop.pgtbl_cfg.tlb)
+ free_io_pgtable_ops(&domain->iop.iop.ops);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+}
+EXPORT_SYMBOL(amd_iommu_domain_direct_map);
+
+/* Note: This function expects iommu_domain->lock to be held prior calling the function. */
+static int domain_enable_v2(struct protection_domain *domain, int pasids)
+{
+ int levels;
+
+ /* Number of GCR3 table levels required */
+ for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
+ levels += 1;
+
+ if (levels > amd_iommu_max_glx_val)
+ return -EINVAL;
+
+ domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (domain->gcr3_tbl == NULL)
+ return -ENOMEM;
+
+ domain->glx = levels;
+ domain->flags |= PD_IOMMUV2_MASK;
+
+ amd_iommu_domain_update(domain);
+
+ return 0;
+}
+
+int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
+{
+ struct protection_domain *pdom = to_pdomain(dom);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pdom->lock, flags);
+
+ /*
+ * Save us all sanity checks whether devices already in the
+ * domain support IOMMUv2. Just force that the domain has no
+ * devices attached when it is switched into IOMMUv2 mode.
+ */
+ ret = -EBUSY;
+ if (pdom->dev_cnt > 0 || pdom->flags & PD_IOMMUV2_MASK)
+ goto out;
+
+ if (!pdom->gcr3_tbl)
+ ret = domain_enable_v2(pdom, pasids);
+
+out:
+ spin_unlock_irqrestore(&pdom->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
+
+static int __flush_pasid(struct protection_domain *domain, u32 pasid,
+ u64 address, bool size)
+{
+ struct iommu_dev_data *dev_data;
+ struct iommu_cmd cmd;
+ int i, ret;
+
+ if (!(domain->flags & PD_IOMMUV2_MASK))
+ return -EINVAL;
+
+ build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
+
+ /*
+ * IOMMU TLB needs to be flushed before Device TLB to
+ * prevent device TLB refill from IOMMU TLB
+ */
+ for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
+ if (domain->dev_iommu[i] == 0)
+ continue;
+
+ ret = iommu_queue_command(amd_iommus[i], &cmd);
+ if (ret != 0)
+ goto out;
+ }
+
+ /* Wait until IOMMU TLB flushes are complete */
+ amd_iommu_domain_flush_complete(domain);
+
+ /* Now flush device TLBs */
+ list_for_each_entry(dev_data, &domain->dev_list, list) {
+ struct amd_iommu *iommu;
+ int qdep;
+
+ /*
+ There might be non-IOMMUv2 capable devices in an IOMMUv2
+ * domain.
+ */
+ if (!dev_data->ats.enabled)
+ continue;
+
+ qdep = dev_data->ats.qdep;
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ continue;
+ build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
+ qdep, address, size);
+
+ ret = iommu_queue_command(iommu, &cmd);
+ if (ret != 0)
+ goto out;
+ }
+
+ /* Wait until all device TLBs are flushed */
+ amd_iommu_domain_flush_complete(domain);
+
+ ret = 0;
+
+out:
+
+ return ret;
+}
+
+static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
+ u64 address)
+{
+ return __flush_pasid(domain, pasid, address, false);
+}
+
+int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
+ u64 address)
+{
+ struct protection_domain *domain = to_pdomain(dom);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __amd_iommu_flush_page(domain, pasid, address);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_flush_page);
+
+static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
+{
+ return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+ true);
+}
+
+int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
+{
+ struct protection_domain *domain = to_pdomain(dom);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __amd_iommu_flush_tlb(domain, pasid);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_flush_tlb);
+
+static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
+{
+ int index;
+ u64 *pte;
+
+ while (true) {
+
+ index = (pasid >> (9 * level)) & 0x1ff;
+ pte = &root[index];
+
+ if (level == 0)
+ break;
+
+ if (!(*pte & GCR3_VALID)) {
+ if (!alloc)
+ return NULL;
+
+ root = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (root == NULL)
+ return NULL;
+
+ *pte = iommu_virt_to_phys(root) | GCR3_VALID;
+ }
+
+ root = iommu_phys_to_virt(*pte & PAGE_MASK);
+
+ level -= 1;
+ }
+
+ return pte;
+}
+
+static int __set_gcr3(struct protection_domain *domain, u32 pasid,
+ unsigned long cr3)
+{
+ u64 *pte;
+
+ if (domain->iop.mode != PAGE_MODE_NONE)
+ return -EINVAL;
+
+ pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
+ if (pte == NULL)
+ return -ENOMEM;
+
+ *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
+
+ return __amd_iommu_flush_tlb(domain, pasid);
+}
+
+static int __clear_gcr3(struct protection_domain *domain, u32 pasid)
+{
+ u64 *pte;
+
+ if (domain->iop.mode != PAGE_MODE_NONE)
+ return -EINVAL;
+
+ pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
+ if (pte == NULL)
+ return 0;
+
+ *pte = 0;
+
+ return __amd_iommu_flush_tlb(domain, pasid);
+}
+
+int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
+ unsigned long cr3)
+{
+ struct protection_domain *domain = to_pdomain(dom);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __set_gcr3(domain, pasid, cr3);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
+
+int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
+{
+ struct protection_domain *domain = to_pdomain(dom);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __clear_gcr3(domain, pasid);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
+
+int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
+ int status, int tag)
+{
+ struct iommu_dev_data *dev_data;
+ struct amd_iommu *iommu;
+ struct iommu_cmd cmd;
+
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+ iommu = rlookup_amd_iommu(&pdev->dev);
+ if (!iommu)
+ return -ENODEV;
+
+ build_complete_ppr(&cmd, dev_data->devid, pasid, status,
+ tag, dev_data->pri_tlp);
+
+ return iommu_queue_command(iommu, &cmd);
+}
+EXPORT_SYMBOL(amd_iommu_complete_ppr);
+
+int amd_iommu_device_info(struct pci_dev *pdev,
+ struct amd_iommu_device_info *info)
+{
+ int max_pasids;
+ int pos;
+
+ if (pdev == NULL || info == NULL)
+ return -EINVAL;
+
+ if (!amd_iommu_v2_supported())
+ return -EINVAL;
+
+ memset(info, 0, sizeof(*info));
+
+ if (pci_ats_supported(pdev))
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (pos)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+ if (pos) {
+ int features;
+
+ max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
+ max_pasids = min(max_pasids, (1 << 20));
+
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
+ info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
+
+ features = pci_pasid_features(pdev);
+ if (features & PCI_PASID_CAP_EXEC)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
+ if (features & PCI_PASID_CAP_PRIV)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(amd_iommu_device_info);
+
+#ifdef CONFIG_IRQ_REMAP
+
+/*****************************************************************************
+ *
+ * Interrupt Remapping Implementation
+ *
+ *****************************************************************************/
+
+static struct irq_chip amd_ir_chip;
+static DEFINE_SPINLOCK(iommu_table_lock);
+
+static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
+{
+ int ret;
+ u64 data;
+ unsigned long flags;
+ struct iommu_cmd cmd, cmd2;
+
+ if (iommu->irtcachedis_enabled)
+ return;
+
+ build_inv_irt(&cmd, devid);
+ data = atomic64_add_return(1, &iommu->cmd_sem_val);
+ build_completion_wait(&cmd2, iommu, data);
+
+ raw_spin_lock_irqsave(&iommu->lock, flags);
+ ret = __iommu_queue_command_sync(iommu, &cmd, true);
+ if (ret)
+ goto out;
+ ret = __iommu_queue_command_sync(iommu, &cmd2, false);
+ if (ret)
+ goto out;
+ wait_on_sem(iommu, data);
+out:
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
+ struct irq_remap_table *table)
+{
+ u64 dte;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ dte = dev_table[devid].data[2];
+ dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
+ dte |= iommu_virt_to_phys(table->table);
+ dte |= DTE_IRQ_REMAP_INTCTL;
+ dte |= DTE_INTTABLEN;
+ dte |= DTE_IRQ_REMAP_ENABLE;
+
+ dev_table[devid].data[2] = dte;
+}
+
+static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
+{
+ struct irq_remap_table *table;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ if (WARN_ONCE(!pci_seg->rlookup_table[devid],
+ "%s: no iommu for devid %x:%x\n",
+ __func__, pci_seg->id, devid))
+ return NULL;
+
+ table = pci_seg->irq_lookup_table[devid];
+ if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n",
+ __func__, pci_seg->id, devid))
+ return NULL;
+
+ return table;
+}
+
+static struct irq_remap_table *__alloc_irq_table(void)
+{
+ struct irq_remap_table *table;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return NULL;
+
+ table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
+ if (!table->table) {
+ kfree(table);
+ return NULL;
+ }
+ raw_spin_lock_init(&table->lock);
+
+ if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+ memset(table->table, 0,
+ MAX_IRQS_PER_TABLE * sizeof(u32));
+ else
+ memset(table->table, 0,
+ (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
+ return table;
+}
+
+static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
+ struct irq_remap_table *table)
+{
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ pci_seg->irq_lookup_table[devid] = table;
+ set_dte_irq_entry(iommu, devid, table);
+ iommu_flush_dte(iommu, devid);
+}
+
+static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
+ void *data)
+{
+ struct irq_remap_table *table = data;
+ struct amd_iommu_pci_seg *pci_seg;
+ struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev);
+
+ if (!iommu)
+ return -EINVAL;
+
+ pci_seg = iommu->pci_seg;
+ pci_seg->irq_lookup_table[alias] = table;
+ set_dte_irq_entry(iommu, alias, table);
+ iommu_flush_dte(pci_seg->rlookup_table[alias], alias);
+
+ return 0;
+}
+
+static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
+ u16 devid, struct pci_dev *pdev)
+{
+ struct irq_remap_table *table = NULL;
+ struct irq_remap_table *new_table = NULL;
+ struct amd_iommu_pci_seg *pci_seg;
+ unsigned long flags;
+ u16 alias;
+
+ spin_lock_irqsave(&iommu_table_lock, flags);
+
+ pci_seg = iommu->pci_seg;
+ table = pci_seg->irq_lookup_table[devid];
+ if (table)
+ goto out_unlock;
+
+ alias = pci_seg->alias_table[devid];
+ table = pci_seg->irq_lookup_table[alias];
+ if (table) {
+ set_remap_table_entry(iommu, devid, table);
+ goto out_wait;
+ }
+ spin_unlock_irqrestore(&iommu_table_lock, flags);
+
+ /* Nothing there yet, allocate new irq remapping table */
+ new_table = __alloc_irq_table();
+ if (!new_table)
+ return NULL;
+
+ spin_lock_irqsave(&iommu_table_lock, flags);
+
+ table = pci_seg->irq_lookup_table[devid];
+ if (table)
+ goto out_unlock;
+
+ table = pci_seg->irq_lookup_table[alias];
+ if (table) {
+ set_remap_table_entry(iommu, devid, table);
+ goto out_wait;
+ }
+
+ table = new_table;
+ new_table = NULL;
+
+ if (pdev)
+ pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
+ table);
+ else
+ set_remap_table_entry(iommu, devid, table);
+
+ if (devid != alias)
+ set_remap_table_entry(iommu, alias, table);
+
+out_wait:
+ iommu_completion_wait(iommu);
+
+out_unlock:
+ spin_unlock_irqrestore(&iommu_table_lock, flags);
+
+ if (new_table) {
+ kmem_cache_free(amd_iommu_irq_cache, new_table->table);
+ kfree(new_table);
+ }
+ return table;
+}
+
+static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
+ bool align, struct pci_dev *pdev)
+{
+ struct irq_remap_table *table;
+ int index, c, alignment = 1;
+ unsigned long flags;
+
+ table = alloc_irq_table(iommu, devid, pdev);
+ if (!table)
+ return -ENODEV;
+
+ if (align)
+ alignment = roundup_pow_of_two(count);
+
+ raw_spin_lock_irqsave(&table->lock, flags);
+
+ /* Scan table for free entries */
+ for (index = ALIGN(table->min_index, alignment), c = 0;
+ index < MAX_IRQS_PER_TABLE;) {
+ if (!iommu->irte_ops->is_allocated(table, index)) {
+ c += 1;
+ } else {
+ c = 0;
+ index = ALIGN(index + 1, alignment);
+ continue;
+ }
+
+ if (c == count) {
+ for (; c != 0; --c)
+ iommu->irte_ops->set_allocated(table, index - c + 1);
+
+ index -= count - 1;
+ goto out;
+ }
+
+ index++;
+ }
+
+ index = -ENOSPC;
+
+out:
+ raw_spin_unlock_irqrestore(&table->lock, flags);
+
+ return index;
+}
+
+static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
+ struct irte_ga *irte)
+{
+ struct irq_remap_table *table;
+ struct irte_ga *entry;
+ unsigned long flags;
+ u128 old;
+
+ table = get_irq_table(iommu, devid);
+ if (!table)
+ return -ENOMEM;
+
+ raw_spin_lock_irqsave(&table->lock, flags);
+
+ entry = (struct irte_ga *)table->table;
+ entry = &entry[index];
+
+ /*
+ * We use cmpxchg16 to atomically update the 128-bit IRTE,
+ * and it cannot be updated by the hardware or other processors
+ * behind us, so the return value of cmpxchg16 should be the
+ * same as the old value.
+ */
+ old = entry->irte;
+ WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte));
+
+ raw_spin_unlock_irqrestore(&table->lock, flags);
+
+ iommu_flush_irt_and_complete(iommu, devid);
+
+ return 0;
+}
+
+static int modify_irte(struct amd_iommu *iommu,
+ u16 devid, int index, union irte *irte)
+{
+ struct irq_remap_table *table;
+ unsigned long flags;
+
+ table = get_irq_table(iommu, devid);
+ if (!table)
+ return -ENOMEM;
+
+ raw_spin_lock_irqsave(&table->lock, flags);
+ table->table[index] = irte->val;
+ raw_spin_unlock_irqrestore(&table->lock, flags);
+
+ iommu_flush_irt_and_complete(iommu, devid);
+
+ return 0;
+}
+
+static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
+{
+ struct irq_remap_table *table;
+ unsigned long flags;
+
+ table = get_irq_table(iommu, devid);
+ if (!table)
+ return;
+
+ raw_spin_lock_irqsave(&table->lock, flags);
+ iommu->irte_ops->clear_allocated(table, index);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
+
+ iommu_flush_irt_and_complete(iommu, devid);
+}
+
+static void irte_prepare(void *entry,
+ u32 delivery_mode, bool dest_mode,
+ u8 vector, u32 dest_apicid, int devid)
+{
+ union irte *irte = (union irte *) entry;
+
+ irte->val = 0;
+ irte->fields.vector = vector;
+ irte->fields.int_type = delivery_mode;
+ irte->fields.destination = dest_apicid;
+ irte->fields.dm = dest_mode;
+ irte->fields.valid = 1;
+}
+
+static void irte_ga_prepare(void *entry,
+ u32 delivery_mode, bool dest_mode,
+ u8 vector, u32 dest_apicid, int devid)
+{
+ struct irte_ga *irte = (struct irte_ga *) entry;
+
+ irte->lo.val = 0;
+ irte->hi.val = 0;
+ irte->lo.fields_remap.int_type = delivery_mode;
+ irte->lo.fields_remap.dm = dest_mode;
+ irte->hi.fields.vector = vector;
+ irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid);
+ irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid);
+ irte->lo.fields_remap.valid = 1;
+}
+
+static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
+{
+ union irte *irte = (union irte *) entry;
+
+ irte->fields.valid = 1;
+ modify_irte(iommu, devid, index, irte);
+}
+
+static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
+{
+ struct irte_ga *irte = (struct irte_ga *) entry;
+
+ irte->lo.fields_remap.valid = 1;
+ modify_irte_ga(iommu, devid, index, irte);
+}
+
+static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
+{
+ union irte *irte = (union irte *) entry;
+
+ irte->fields.valid = 0;
+ modify_irte(iommu, devid, index, irte);
+}
+
+static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
+{
+ struct irte_ga *irte = (struct irte_ga *) entry;
+
+ irte->lo.fields_remap.valid = 0;
+ modify_irte_ga(iommu, devid, index, irte);
+}
+
+static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
+ u8 vector, u32 dest_apicid)
+{
+ union irte *irte = (union irte *) entry;
+
+ irte->fields.vector = vector;
+ irte->fields.destination = dest_apicid;
+ modify_irte(iommu, devid, index, irte);
+}
+
+static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
+ u8 vector, u32 dest_apicid)
+{
+ struct irte_ga *irte = (struct irte_ga *) entry;
+
+ if (!irte->lo.fields_remap.guest_mode) {
+ irte->hi.fields.vector = vector;
+ irte->lo.fields_remap.destination =
+ APICID_TO_IRTE_DEST_LO(dest_apicid);
+ irte->hi.fields.destination =
+ APICID_TO_IRTE_DEST_HI(dest_apicid);
+ modify_irte_ga(iommu, devid, index, irte);
+ }
+}
+
+#define IRTE_ALLOCATED (~1U)
+static void irte_set_allocated(struct irq_remap_table *table, int index)
+{
+ table->table[index] = IRTE_ALLOCATED;
+}
+
+static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
+{
+ struct irte_ga *ptr = (struct irte_ga *)table->table;
+ struct irte_ga *irte = &ptr[index];
+
+ memset(&irte->lo.val, 0, sizeof(u64));
+ memset(&irte->hi.val, 0, sizeof(u64));
+ irte->hi.fields.vector = 0xff;
+}
+
+static bool irte_is_allocated(struct irq_remap_table *table, int index)
+{
+ union irte *ptr = (union irte *)table->table;
+ union irte *irte = &ptr[index];
+
+ return irte->val != 0;
+}
+
+static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
+{
+ struct irte_ga *ptr = (struct irte_ga *)table->table;
+ struct irte_ga *irte = &ptr[index];
+
+ return irte->hi.fields.vector != 0;
+}
+
+static void irte_clear_allocated(struct irq_remap_table *table, int index)
+{
+ table->table[index] = 0;
+}
+
+static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
+{
+ struct irte_ga *ptr = (struct irte_ga *)table->table;
+ struct irte_ga *irte = &ptr[index];
+
+ memset(&irte->lo.val, 0, sizeof(u64));
+ memset(&irte->hi.val, 0, sizeof(u64));
+}
+
+static int get_devid(struct irq_alloc_info *info)
+{
+ switch (info->type) {
+ case X86_IRQ_ALLOC_TYPE_IOAPIC:
+ return get_ioapic_devid(info->devid);
+ case X86_IRQ_ALLOC_TYPE_HPET:
+ return get_hpet_devid(info->devid);
+ case X86_IRQ_ALLOC_TYPE_PCI_MSI:
+ case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
+ return get_device_sbdf_id(msi_desc_to_dev(info->desc));
+ default:
+ WARN_ON_ONCE(1);
+ return -1;
+ }
+}
+
+struct irq_remap_ops amd_iommu_irq_ops = {
+ .prepare = amd_iommu_prepare,
+ .enable = amd_iommu_enable,
+ .disable = amd_iommu_disable,
+ .reenable = amd_iommu_reenable,
+ .enable_faulting = amd_iommu_enable_faulting,
+};
+
+static void fill_msi_msg(struct msi_msg *msg, u32 index)
+{
+ msg->data = index;
+ msg->address_lo = 0;
+ msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
+ msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
+}
+
+static void irq_remapping_prepare_irte(struct amd_ir_data *data,
+ struct irq_cfg *irq_cfg,
+ struct irq_alloc_info *info,
+ int devid, int index, int sub_handle)
+{
+ struct irq_2_irte *irte_info = &data->irq_2_irte;
+ struct amd_iommu *iommu = data->iommu;
+
+ if (!iommu)
+ return;
+
+ data->irq_2_irte.devid = devid;
+ data->irq_2_irte.index = index + sub_handle;
+ iommu->irte_ops->prepare(data->entry, apic->delivery_mode,
+ apic->dest_mode_logical, irq_cfg->vector,
+ irq_cfg->dest_apicid, devid);
+
+ switch (info->type) {
+ case X86_IRQ_ALLOC_TYPE_IOAPIC:
+ case X86_IRQ_ALLOC_TYPE_HPET:
+ case X86_IRQ_ALLOC_TYPE_PCI_MSI:
+ case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
+ fill_msi_msg(&data->msi_entry, irte_info->index);
+ break;
+
+ default:
+ BUG_ON(1);
+ break;
+ }
+}
+
+struct amd_irte_ops irte_32_ops = {
+ .prepare = irte_prepare,
+ .activate = irte_activate,
+ .deactivate = irte_deactivate,
+ .set_affinity = irte_set_affinity,
+ .set_allocated = irte_set_allocated,
+ .is_allocated = irte_is_allocated,
+ .clear_allocated = irte_clear_allocated,
+};
+
+struct amd_irte_ops irte_128_ops = {
+ .prepare = irte_ga_prepare,
+ .activate = irte_ga_activate,
+ .deactivate = irte_ga_deactivate,
+ .set_affinity = irte_ga_set_affinity,
+ .set_allocated = irte_ga_set_allocated,
+ .is_allocated = irte_ga_is_allocated,
+ .clear_allocated = irte_ga_clear_allocated,
+};
+
+static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct irq_alloc_info *info = arg;
+ struct irq_data *irq_data;
+ struct amd_ir_data *data = NULL;
+ struct amd_iommu *iommu;
+ struct irq_cfg *cfg;
+ int i, ret, devid, seg, sbdf;
+ int index;
+
+ if (!info)
+ return -EINVAL;
+ if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI)
+ return -EINVAL;
+
+ sbdf = get_devid(info);
+ if (sbdf < 0)
+ return -EINVAL;
+
+ seg = PCI_SBDF_TO_SEGID(sbdf);
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ iommu = __rlookup_amd_iommu(seg, devid);
+ if (!iommu)
+ return -EINVAL;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+ if (ret < 0)
+ return ret;
+
+ if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
+ struct irq_remap_table *table;
+
+ table = alloc_irq_table(iommu, devid, NULL);
+ if (table) {
+ if (!table->min_index) {
+ /*
+ * Keep the first 32 indexes free for IOAPIC
+ * interrupts.
+ */
+ table->min_index = 32;
+ for (i = 0; i < 32; ++i)
+ iommu->irte_ops->set_allocated(table, i);
+ }
+ WARN_ON(table->min_index != 32);
+ index = info->ioapic.pin;
+ } else {
+ index = -ENOMEM;
+ }
+ } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI ||
+ info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
+ bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
+
+ index = alloc_irq_index(iommu, devid, nr_irqs, align,
+ msi_desc_to_pci_dev(info->desc));
+ } else {
+ index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL);
+ }
+
+ if (index < 0) {
+ pr_warn("Failed to allocate IRTE\n");
+ ret = index;
+ goto out_free_parent;
+ }
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_data = irq_domain_get_irq_data(domain, virq + i);
+ cfg = irq_data ? irqd_cfg(irq_data) : NULL;
+ if (!cfg) {
+ ret = -EINVAL;
+ goto out_free_data;
+ }
+
+ ret = -ENOMEM;
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto out_free_data;
+
+ if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+ data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
+ else
+ data->entry = kzalloc(sizeof(struct irte_ga),
+ GFP_KERNEL);
+ if (!data->entry) {
+ kfree(data);
+ goto out_free_data;
+ }
+
+ data->iommu = iommu;
+ irq_data->hwirq = (devid << 16) + i;
+ irq_data->chip_data = data;
+ irq_data->chip = &amd_ir_chip;
+ irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
+ irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
+ }
+
+ return 0;
+
+out_free_data:
+ for (i--; i >= 0; i--) {
+ irq_data = irq_domain_get_irq_data(domain, virq + i);
+ if (irq_data)
+ kfree(irq_data->chip_data);
+ }
+ for (i = 0; i < nr_irqs; i++)
+ free_irte(iommu, devid, index + i);
+out_free_parent:
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+ return ret;
+}
+
+static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_2_irte *irte_info;
+ struct irq_data *irq_data;
+ struct amd_ir_data *data;
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_data = irq_domain_get_irq_data(domain, virq + i);
+ if (irq_data && irq_data->chip_data) {
+ data = irq_data->chip_data;
+ irte_info = &data->irq_2_irte;
+ free_irte(data->iommu, irte_info->devid, irte_info->index);
+ kfree(data->entry);
+ kfree(data);
+ }
+ }
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+}
+
+static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
+ struct amd_ir_data *ir_data,
+ struct irq_2_irte *irte_info,
+ struct irq_cfg *cfg);
+
+static int irq_remapping_activate(struct irq_domain *domain,
+ struct irq_data *irq_data, bool reserve)
+{
+ struct amd_ir_data *data = irq_data->chip_data;
+ struct irq_2_irte *irte_info = &data->irq_2_irte;
+ struct amd_iommu *iommu = data->iommu;
+ struct irq_cfg *cfg = irqd_cfg(irq_data);
+
+ if (!iommu)
+ return 0;
+
+ iommu->irte_ops->activate(iommu, data->entry, irte_info->devid,
+ irte_info->index);
+ amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
+ return 0;
+}
+
+static void irq_remapping_deactivate(struct irq_domain *domain,
+ struct irq_data *irq_data)
+{
+ struct amd_ir_data *data = irq_data->chip_data;
+ struct irq_2_irte *irte_info = &data->irq_2_irte;
+ struct amd_iommu *iommu = data->iommu;
+
+ if (iommu)
+ iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid,
+ irte_info->index);
+}
+
+static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ struct amd_iommu *iommu;
+ int devid = -1;
+
+ if (!amd_iommu_irq_remap)
+ return 0;
+
+ if (x86_fwspec_is_ioapic(fwspec))
+ devid = get_ioapic_devid(fwspec->param[0]);
+ else if (x86_fwspec_is_hpet(fwspec))
+ devid = get_hpet_devid(fwspec->param[0]);
+
+ if (devid < 0)
+ return 0;
+ iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff));
+
+ return iommu && iommu->ir_domain == d;
+}
+
+static const struct irq_domain_ops amd_ir_domain_ops = {
+ .select = irq_remapping_select,
+ .alloc = irq_remapping_alloc,
+ .free = irq_remapping_free,
+ .activate = irq_remapping_activate,
+ .deactivate = irq_remapping_deactivate,
+};
+
+int amd_iommu_activate_guest_mode(void *data)
+{
+ struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+ struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+ u64 valid;
+
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry)
+ return 0;
+
+ valid = entry->lo.fields_vapic.valid;
+
+ entry->lo.val = 0;
+ entry->hi.val = 0;
+
+ entry->lo.fields_vapic.valid = valid;
+ entry->lo.fields_vapic.guest_mode = 1;
+ entry->lo.fields_vapic.ga_log_intr = 1;
+ entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr;
+ entry->hi.fields.vector = ir_data->ga_vector;
+ entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
+
+ return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
+ ir_data->irq_2_irte.index, entry);
+}
+EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
+
+int amd_iommu_deactivate_guest_mode(void *data)
+{
+ struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+ struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+ struct irq_cfg *cfg = ir_data->cfg;
+ u64 valid;
+
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+ !entry || !entry->lo.fields_vapic.guest_mode)
+ return 0;
+
+ valid = entry->lo.fields_remap.valid;
+
+ entry->lo.val = 0;
+ entry->hi.val = 0;
+
+ entry->lo.fields_remap.valid = valid;
+ entry->lo.fields_remap.dm = apic->dest_mode_logical;
+ entry->lo.fields_remap.int_type = apic->delivery_mode;
+ entry->hi.fields.vector = cfg->vector;
+ entry->lo.fields_remap.destination =
+ APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
+ entry->hi.fields.destination =
+ APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
+
+ return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
+ ir_data->irq_2_irte.index, entry);
+}
+EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
+
+static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
+{
+ int ret;
+ struct amd_iommu_pi_data *pi_data = vcpu_info;
+ struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
+ struct amd_ir_data *ir_data = data->chip_data;
+ struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
+ struct iommu_dev_data *dev_data;
+
+ if (ir_data->iommu == NULL)
+ return -EINVAL;
+
+ dev_data = search_dev_data(ir_data->iommu, irte_info->devid);
+
+ /* Note:
+ * This device has never been set up for guest mode.
+ * we should not modify the IRTE
+ */
+ if (!dev_data || !dev_data->use_vapic)
+ return 0;
+
+ ir_data->cfg = irqd_cfg(data);
+ pi_data->ir_data = ir_data;
+
+ /* Note:
+ * SVM tries to set up for VAPIC mode, but we are in
+ * legacy mode. So, we force legacy mode instead.
+ */
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
+ pr_debug("%s: Fall back to using intr legacy remap\n",
+ __func__);
+ pi_data->is_guest_mode = false;
+ }
+
+ pi_data->prev_ga_tag = ir_data->cached_ga_tag;
+ if (pi_data->is_guest_mode) {
+ ir_data->ga_root_ptr = (pi_data->base >> 12);
+ ir_data->ga_vector = vcpu_pi_info->vector;
+ ir_data->ga_tag = pi_data->ga_tag;
+ ret = amd_iommu_activate_guest_mode(ir_data);
+ if (!ret)
+ ir_data->cached_ga_tag = pi_data->ga_tag;
+ } else {
+ ret = amd_iommu_deactivate_guest_mode(ir_data);
+
+ /*
+ * This communicates the ga_tag back to the caller
+ * so that it can do all the necessary clean up.
+ */
+ if (!ret)
+ ir_data->cached_ga_tag = 0;
+ }
+
+ return ret;
+}
+
+
+static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
+ struct amd_ir_data *ir_data,
+ struct irq_2_irte *irte_info,
+ struct irq_cfg *cfg)
+{
+
+ /*
+ * Atomically updates the IRTE with the new destination, vector
+ * and flushes the interrupt entry cache.
+ */
+ iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid,
+ irte_info->index, cfg->vector,
+ cfg->dest_apicid);
+}
+
+static int amd_ir_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+{
+ struct amd_ir_data *ir_data = data->chip_data;
+ struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
+ struct irq_cfg *cfg = irqd_cfg(data);
+ struct irq_data *parent = data->parent_data;
+ struct amd_iommu *iommu = ir_data->iommu;
+ int ret;
+
+ if (!iommu)
+ return -ENODEV;
+
+ ret = parent->chip->irq_set_affinity(parent, mask, force);
+ if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
+ return ret;
+
+ amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
+ /*
+ * After this point, all the interrupts will start arriving
+ * at the new destination. So, time to cleanup the previous
+ * vector allocation.
+ */
+ vector_schedule_cleanup(cfg);
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+
+static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
+{
+ struct amd_ir_data *ir_data = irq_data->chip_data;
+
+ *msg = ir_data->msi_entry;
+}
+
+static struct irq_chip amd_ir_chip = {
+ .name = "AMD-IR",
+ .irq_ack = apic_ack_irq,
+ .irq_set_affinity = amd_ir_set_affinity,
+ .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
+ .irq_compose_msi_msg = ir_compose_msi_msg,
+};
+
+static const struct msi_parent_ops amdvi_msi_parent_ops = {
+ .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED |
+ MSI_FLAG_MULTI_PCI_MSI |
+ MSI_FLAG_PCI_IMS,
+ .prefix = "IR-",
+ .init_dev_msi_info = msi_parent_init_dev_msi_info,
+};
+
+static const struct msi_parent_ops virt_amdvi_msi_parent_ops = {
+ .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED |
+ MSI_FLAG_MULTI_PCI_MSI,
+ .prefix = "vIR-",
+ .init_dev_msi_info = msi_parent_init_dev_msi_info,
+};
+
+int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
+{
+ struct fwnode_handle *fn;
+
+ fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index);
+ if (!fn)
+ return -ENOMEM;
+ iommu->ir_domain = irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, 0,
+ fn, &amd_ir_domain_ops, iommu);
+ if (!iommu->ir_domain) {
+ irq_domain_free_fwnode(fn);
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_AMDVI);
+ iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT |
+ IRQ_DOMAIN_FLAG_ISOLATED_MSI;
+
+ if (amd_iommu_np_cache)
+ iommu->ir_domain->msi_parent_ops = &virt_amdvi_msi_parent_ops;
+ else
+ iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops;
+
+ return 0;
+}
+
+int amd_iommu_update_ga(int cpu, bool is_run, void *data)
+{
+ struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+ struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+ !entry || !entry->lo.fields_vapic.guest_mode)
+ return 0;
+
+ if (!ir_data->iommu)
+ return -ENODEV;
+
+ if (cpu >= 0) {
+ entry->lo.fields_vapic.destination =
+ APICID_TO_IRTE_DEST_LO(cpu);
+ entry->hi.fields.destination =
+ APICID_TO_IRTE_DEST_HI(cpu);
+ }
+ entry->lo.fields_vapic.is_run = is_run;
+
+ return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
+ ir_data->irq_2_irte.index, entry);
+}
+EXPORT_SYMBOL(amd_iommu_update_ga);
+#endif
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
new file mode 100644
index 0000000000..57c2fb1146
--- /dev/null
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -0,0 +1,996 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+#define pr_fmt(fmt) "AMD-Vi: " fmt
+
+#include <linux/refcount.h>
+#include <linux/mmu_notifier.h>
+#include <linux/amd-iommu.h>
+#include <linux/mm_types.h>
+#include <linux/profile.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/gfp.h>
+#include <linux/cc_platform.h>
+
+#include "amd_iommu.h"
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
+
+#define PRI_QUEUE_SIZE 512
+
+struct pri_queue {
+ atomic_t inflight;
+ bool finish;
+ int status;
+};
+
+struct pasid_state {
+ struct list_head list; /* For global state-list */
+ refcount_t count; /* Reference count */
+ unsigned mmu_notifier_count; /* Counting nested mmu_notifier
+ calls */
+ struct mm_struct *mm; /* mm_struct for the faults */
+ struct mmu_notifier mn; /* mmu_notifier handle */
+ struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
+ struct device_state *device_state; /* Link to our device_state */
+ u32 pasid; /* PASID index */
+ bool invalid; /* Used during setup and
+ teardown of the pasid */
+ spinlock_t lock; /* Protect pri_queues and
+ mmu_notifer_count */
+ wait_queue_head_t wq; /* To wait for count == 0 */
+};
+
+struct device_state {
+ struct list_head list;
+ u32 sbdf;
+ atomic_t count;
+ struct pci_dev *pdev;
+ struct pasid_state **states;
+ struct iommu_domain *domain;
+ int pasid_levels;
+ int max_pasids;
+ amd_iommu_invalid_ppr_cb inv_ppr_cb;
+ amd_iommu_invalidate_ctx inv_ctx_cb;
+ spinlock_t lock;
+ wait_queue_head_t wq;
+};
+
+struct fault {
+ struct work_struct work;
+ struct device_state *dev_state;
+ struct pasid_state *state;
+ struct mm_struct *mm;
+ u64 address;
+ u32 pasid;
+ u16 tag;
+ u16 finish;
+ u16 flags;
+};
+
+static LIST_HEAD(state_list);
+static DEFINE_SPINLOCK(state_lock);
+
+static struct workqueue_struct *iommu_wq;
+
+static void free_pasid_states(struct device_state *dev_state);
+
+static struct device_state *__get_device_state(u32 sbdf)
+{
+ struct device_state *dev_state;
+
+ list_for_each_entry(dev_state, &state_list, list) {
+ if (dev_state->sbdf == sbdf)
+ return dev_state;
+ }
+
+ return NULL;
+}
+
+static struct device_state *get_device_state(u32 sbdf)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ dev_state = __get_device_state(sbdf);
+ if (dev_state != NULL)
+ atomic_inc(&dev_state->count);
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return dev_state;
+}
+
+static void free_device_state(struct device_state *dev_state)
+{
+ struct iommu_group *group;
+
+ /* Get rid of any remaining pasid states */
+ free_pasid_states(dev_state);
+
+ /*
+ * Wait until the last reference is dropped before freeing
+ * the device state.
+ */
+ wait_event(dev_state->wq, !atomic_read(&dev_state->count));
+
+ /*
+ * First detach device from domain - No more PRI requests will arrive
+ * from that device after it is unbound from the IOMMUv2 domain.
+ */
+ group = iommu_group_get(&dev_state->pdev->dev);
+ if (WARN_ON(!group))
+ return;
+
+ iommu_detach_group(dev_state->domain, group);
+
+ iommu_group_put(group);
+
+ /* Everything is down now, free the IOMMUv2 domain */
+ iommu_domain_free(dev_state->domain);
+
+ /* Finally get rid of the device-state */
+ kfree(dev_state);
+}
+
+static void put_device_state(struct device_state *dev_state)
+{
+ if (atomic_dec_and_test(&dev_state->count))
+ wake_up(&dev_state->wq);
+}
+
+/* Must be called under dev_state->lock */
+static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
+ u32 pasid, bool alloc)
+{
+ struct pasid_state **root, **ptr;
+ int level, index;
+
+ level = dev_state->pasid_levels;
+ root = dev_state->states;
+
+ while (true) {
+
+ index = (pasid >> (9 * level)) & 0x1ff;
+ ptr = &root[index];
+
+ if (level == 0)
+ break;
+
+ if (*ptr == NULL) {
+ if (!alloc)
+ return NULL;
+
+ *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (*ptr == NULL)
+ return NULL;
+ }
+
+ root = (struct pasid_state **)*ptr;
+ level -= 1;
+ }
+
+ return ptr;
+}
+
+static int set_pasid_state(struct device_state *dev_state,
+ struct pasid_state *pasid_state,
+ u32 pasid)
+{
+ struct pasid_state **ptr;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev_state->lock, flags);
+ ptr = __get_pasid_state_ptr(dev_state, pasid, true);
+
+ ret = -ENOMEM;
+ if (ptr == NULL)
+ goto out_unlock;
+
+ ret = -ENOMEM;
+ if (*ptr != NULL)
+ goto out_unlock;
+
+ *ptr = pasid_state;
+
+ ret = 0;
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_state->lock, flags);
+
+ return ret;
+}
+
+static void clear_pasid_state(struct device_state *dev_state, u32 pasid)
+{
+ struct pasid_state **ptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_state->lock, flags);
+ ptr = __get_pasid_state_ptr(dev_state, pasid, true);
+
+ if (ptr == NULL)
+ goto out_unlock;
+
+ *ptr = NULL;
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_state->lock, flags);
+}
+
+static struct pasid_state *get_pasid_state(struct device_state *dev_state,
+ u32 pasid)
+{
+ struct pasid_state **ptr, *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_state->lock, flags);
+ ptr = __get_pasid_state_ptr(dev_state, pasid, false);
+
+ if (ptr == NULL)
+ goto out_unlock;
+
+ ret = *ptr;
+ if (ret)
+ refcount_inc(&ret->count);
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_state->lock, flags);
+
+ return ret;
+}
+
+static void free_pasid_state(struct pasid_state *pasid_state)
+{
+ kfree(pasid_state);
+}
+
+static void put_pasid_state(struct pasid_state *pasid_state)
+{
+ if (refcount_dec_and_test(&pasid_state->count))
+ wake_up(&pasid_state->wq);
+}
+
+static void put_pasid_state_wait(struct pasid_state *pasid_state)
+{
+ if (!refcount_dec_and_test(&pasid_state->count))
+ wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
+ free_pasid_state(pasid_state);
+}
+
+static void unbind_pasid(struct pasid_state *pasid_state)
+{
+ struct iommu_domain *domain;
+
+ domain = pasid_state->device_state->domain;
+
+ /*
+ * Mark pasid_state as invalid, no more faults will we added to the
+ * work queue after this is visible everywhere.
+ */
+ pasid_state->invalid = true;
+
+ /* Make sure this is visible */
+ smp_wmb();
+
+ /* After this the device/pasid can't access the mm anymore */
+ amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
+
+ /* Make sure no more pending faults are in the queue */
+ flush_workqueue(iommu_wq);
+}
+
+static void free_pasid_states_level1(struct pasid_state **tbl)
+{
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (tbl[i] == NULL)
+ continue;
+
+ free_page((unsigned long)tbl[i]);
+ }
+}
+
+static void free_pasid_states_level2(struct pasid_state **tbl)
+{
+ struct pasid_state **ptr;
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (tbl[i] == NULL)
+ continue;
+
+ ptr = (struct pasid_state **)tbl[i];
+ free_pasid_states_level1(ptr);
+ }
+}
+
+static void free_pasid_states(struct device_state *dev_state)
+{
+ struct pasid_state *pasid_state;
+ int i;
+
+ for (i = 0; i < dev_state->max_pasids; ++i) {
+ pasid_state = get_pasid_state(dev_state, i);
+ if (pasid_state == NULL)
+ continue;
+
+ put_pasid_state(pasid_state);
+
+ /* Clear the pasid state so that the pasid can be re-used */
+ clear_pasid_state(dev_state, pasid_state->pasid);
+
+ /*
+ * This will call the mn_release function and
+ * unbind the PASID
+ */
+ mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
+
+ put_pasid_state_wait(pasid_state); /* Reference taken in
+ amd_iommu_bind_pasid */
+
+ /* Drop reference taken in amd_iommu_bind_pasid */
+ put_device_state(dev_state);
+ }
+
+ if (dev_state->pasid_levels == 2)
+ free_pasid_states_level2(dev_state->states);
+ else if (dev_state->pasid_levels == 1)
+ free_pasid_states_level1(dev_state->states);
+ else
+ BUG_ON(dev_state->pasid_levels != 0);
+
+ free_page((unsigned long)dev_state->states);
+}
+
+static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
+{
+ return container_of(mn, struct pasid_state, mn);
+}
+
+static void mn_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+
+ pasid_state = mn_to_state(mn);
+ dev_state = pasid_state->device_state;
+
+ if ((start ^ (end - 1)) < PAGE_SIZE)
+ amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
+ start);
+ else
+ amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
+}
+
+static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+ bool run_inv_ctx_cb;
+
+ might_sleep();
+
+ pasid_state = mn_to_state(mn);
+ dev_state = pasid_state->device_state;
+ run_inv_ctx_cb = !pasid_state->invalid;
+
+ if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
+ dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
+
+ unbind_pasid(pasid_state);
+}
+
+static const struct mmu_notifier_ops iommu_mn = {
+ .release = mn_release,
+ .arch_invalidate_secondary_tlbs = mn_arch_invalidate_secondary_tlbs,
+};
+
+static void set_pri_tag_status(struct pasid_state *pasid_state,
+ u16 tag, int status)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pasid_state->lock, flags);
+ pasid_state->pri[tag].status = status;
+ spin_unlock_irqrestore(&pasid_state->lock, flags);
+}
+
+static void finish_pri_tag(struct device_state *dev_state,
+ struct pasid_state *pasid_state,
+ u16 tag)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pasid_state->lock, flags);
+ if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
+ pasid_state->pri[tag].finish) {
+ amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
+ pasid_state->pri[tag].status, tag);
+ pasid_state->pri[tag].finish = false;
+ pasid_state->pri[tag].status = PPR_SUCCESS;
+ }
+ spin_unlock_irqrestore(&pasid_state->lock, flags);
+}
+
+static void handle_fault_error(struct fault *fault)
+{
+ int status;
+
+ if (!fault->dev_state->inv_ppr_cb) {
+ set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
+ return;
+ }
+
+ status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
+ fault->pasid,
+ fault->address,
+ fault->flags);
+ switch (status) {
+ case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
+ set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
+ break;
+ case AMD_IOMMU_INV_PRI_RSP_INVALID:
+ set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
+ break;
+ case AMD_IOMMU_INV_PRI_RSP_FAIL:
+ set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static bool access_error(struct vm_area_struct *vma, struct fault *fault)
+{
+ unsigned long requested = 0;
+
+ if (fault->flags & PPR_FAULT_EXEC)
+ requested |= VM_EXEC;
+
+ if (fault->flags & PPR_FAULT_READ)
+ requested |= VM_READ;
+
+ if (fault->flags & PPR_FAULT_WRITE)
+ requested |= VM_WRITE;
+
+ return (requested & ~vma->vm_flags) != 0;
+}
+
+static void do_fault(struct work_struct *work)
+{
+ struct fault *fault = container_of(work, struct fault, work);
+ struct vm_area_struct *vma;
+ vm_fault_t ret = VM_FAULT_ERROR;
+ unsigned int flags = 0;
+ struct mm_struct *mm;
+ u64 address;
+
+ mm = fault->state->mm;
+ address = fault->address;
+
+ if (fault->flags & PPR_FAULT_USER)
+ flags |= FAULT_FLAG_USER;
+ if (fault->flags & PPR_FAULT_WRITE)
+ flags |= FAULT_FLAG_WRITE;
+ flags |= FAULT_FLAG_REMOTE;
+
+ mmap_read_lock(mm);
+ vma = vma_lookup(mm, address);
+ if (!vma)
+ /* failed to get a vma in the right range */
+ goto out;
+
+ /* Check if we have the right permissions on the vma */
+ if (access_error(vma, fault))
+ goto out;
+
+ ret = handle_mm_fault(vma, address, flags, NULL);
+out:
+ mmap_read_unlock(mm);
+
+ if (ret & VM_FAULT_ERROR)
+ /* failed to service fault */
+ handle_fault_error(fault);
+
+ finish_pri_tag(fault->dev_state, fault->state, fault->tag);
+
+ put_pasid_state(fault->state);
+
+ kfree(fault);
+}
+
+static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
+{
+ struct amd_iommu_fault *iommu_fault;
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+ struct pci_dev *pdev = NULL;
+ unsigned long flags;
+ struct fault *fault;
+ bool finish;
+ u16 tag, devid, seg_id;
+ int ret;
+
+ iommu_fault = data;
+ tag = iommu_fault->tag & 0x1ff;
+ finish = (iommu_fault->tag >> 9) & 1;
+
+ seg_id = PCI_SBDF_TO_SEGID(iommu_fault->sbdf);
+ devid = PCI_SBDF_TO_DEVID(iommu_fault->sbdf);
+ pdev = pci_get_domain_bus_and_slot(seg_id, PCI_BUS_NUM(devid),
+ devid & 0xff);
+ if (!pdev)
+ return -ENODEV;
+
+ ret = NOTIFY_DONE;
+
+ /* In kdump kernel pci dev is not initialized yet -> send INVALID */
+ if (amd_iommu_is_attach_deferred(&pdev->dev)) {
+ amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
+ PPR_INVALID, tag);
+ goto out;
+ }
+
+ dev_state = get_device_state(iommu_fault->sbdf);
+ if (dev_state == NULL)
+ goto out;
+
+ pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
+ if (pasid_state == NULL || pasid_state->invalid) {
+ /* We know the device but not the PASID -> send INVALID */
+ amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
+ PPR_INVALID, tag);
+ goto out_drop_state;
+ }
+
+ spin_lock_irqsave(&pasid_state->lock, flags);
+ atomic_inc(&pasid_state->pri[tag].inflight);
+ if (finish)
+ pasid_state->pri[tag].finish = true;
+ spin_unlock_irqrestore(&pasid_state->lock, flags);
+
+ fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
+ if (fault == NULL) {
+ /* We are OOM - send success and let the device re-fault */
+ finish_pri_tag(dev_state, pasid_state, tag);
+ goto out_drop_state;
+ }
+
+ fault->dev_state = dev_state;
+ fault->address = iommu_fault->address;
+ fault->state = pasid_state;
+ fault->tag = tag;
+ fault->finish = finish;
+ fault->pasid = iommu_fault->pasid;
+ fault->flags = iommu_fault->flags;
+ INIT_WORK(&fault->work, do_fault);
+
+ queue_work(iommu_wq, &fault->work);
+
+ ret = NOTIFY_OK;
+
+out_drop_state:
+
+ if (ret != NOTIFY_OK && pasid_state)
+ put_pasid_state(pasid_state);
+
+ put_device_state(dev_state);
+
+out:
+ pci_dev_put(pdev);
+ return ret;
+}
+
+static struct notifier_block ppr_nb = {
+ .notifier_call = ppr_notifier,
+};
+
+int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
+ struct task_struct *task)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+ struct mm_struct *mm;
+ u32 sbdf;
+ int ret;
+
+ might_sleep();
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ sbdf = get_pci_sbdf_id(pdev);
+ dev_state = get_device_state(sbdf);
+
+ if (dev_state == NULL)
+ return -EINVAL;
+
+ ret = -EINVAL;
+ if (pasid >= dev_state->max_pasids)
+ goto out;
+
+ ret = -ENOMEM;
+ pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
+ if (pasid_state == NULL)
+ goto out;
+
+
+ refcount_set(&pasid_state->count, 1);
+ init_waitqueue_head(&pasid_state->wq);
+ spin_lock_init(&pasid_state->lock);
+
+ mm = get_task_mm(task);
+ pasid_state->mm = mm;
+ pasid_state->device_state = dev_state;
+ pasid_state->pasid = pasid;
+ pasid_state->invalid = true; /* Mark as valid only if we are
+ done with setting up the pasid */
+ pasid_state->mn.ops = &iommu_mn;
+
+ if (pasid_state->mm == NULL)
+ goto out_free;
+
+ ret = mmu_notifier_register(&pasid_state->mn, mm);
+ if (ret)
+ goto out_free;
+
+ ret = set_pasid_state(dev_state, pasid_state, pasid);
+ if (ret)
+ goto out_unregister;
+
+ ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
+ __pa(pasid_state->mm->pgd));
+ if (ret)
+ goto out_clear_state;
+
+ /* Now we are ready to handle faults */
+ pasid_state->invalid = false;
+
+ /*
+ * Drop the reference to the mm_struct here. We rely on the
+ * mmu_notifier release call-back to inform us when the mm
+ * is going away.
+ */
+ mmput(mm);
+
+ return 0;
+
+out_clear_state:
+ clear_pasid_state(dev_state, pasid);
+
+out_unregister:
+ mmu_notifier_unregister(&pasid_state->mn, mm);
+ mmput(mm);
+
+out_free:
+ free_pasid_state(pasid_state);
+
+out:
+ put_device_state(dev_state);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_bind_pasid);
+
+void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+ u32 sbdf;
+
+ might_sleep();
+
+ if (!amd_iommu_v2_supported())
+ return;
+
+ sbdf = get_pci_sbdf_id(pdev);
+ dev_state = get_device_state(sbdf);
+ if (dev_state == NULL)
+ return;
+
+ if (pasid >= dev_state->max_pasids)
+ goto out;
+
+ pasid_state = get_pasid_state(dev_state, pasid);
+ if (pasid_state == NULL)
+ goto out;
+ /*
+ * Drop reference taken here. We are safe because we still hold
+ * the reference taken in the amd_iommu_bind_pasid function.
+ */
+ put_pasid_state(pasid_state);
+
+ /* Clear the pasid state so that the pasid can be re-used */
+ clear_pasid_state(dev_state, pasid_state->pasid);
+
+ /*
+ * Call mmu_notifier_unregister to drop our reference
+ * to pasid_state->mm
+ */
+ mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
+
+ put_pasid_state_wait(pasid_state); /* Reference taken in
+ amd_iommu_bind_pasid */
+out:
+ /* Drop reference taken in this function */
+ put_device_state(dev_state);
+
+ /* Drop reference taken in amd_iommu_bind_pasid */
+ put_device_state(dev_state);
+}
+EXPORT_SYMBOL(amd_iommu_unbind_pasid);
+
+int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
+{
+ struct device_state *dev_state;
+ struct iommu_group *group;
+ unsigned long flags;
+ int ret, tmp;
+ u32 sbdf;
+
+ might_sleep();
+
+ /*
+ * When memory encryption is active the device is likely not in a
+ * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
+ */
+ if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
+ return -ENODEV;
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ if (pasids <= 0 || pasids > (PASID_MASK + 1))
+ return -EINVAL;
+
+ sbdf = get_pci_sbdf_id(pdev);
+
+ dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
+ if (dev_state == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&dev_state->lock);
+ init_waitqueue_head(&dev_state->wq);
+ dev_state->pdev = pdev;
+ dev_state->sbdf = sbdf;
+
+ tmp = pasids;
+ for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
+ dev_state->pasid_levels += 1;
+
+ atomic_set(&dev_state->count, 1);
+ dev_state->max_pasids = pasids;
+
+ ret = -ENOMEM;
+ dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
+ if (dev_state->states == NULL)
+ goto out_free_dev_state;
+
+ dev_state->domain = iommu_domain_alloc(&pci_bus_type);
+ if (dev_state->domain == NULL)
+ goto out_free_states;
+
+ /* See iommu_is_default_domain() */
+ dev_state->domain->type = IOMMU_DOMAIN_IDENTITY;
+ amd_iommu_domain_direct_map(dev_state->domain);
+
+ ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
+ if (ret)
+ goto out_free_domain;
+
+ group = iommu_group_get(&pdev->dev);
+ if (!group) {
+ ret = -EINVAL;
+ goto out_free_domain;
+ }
+
+ ret = iommu_attach_group(dev_state->domain, group);
+ if (ret != 0)
+ goto out_drop_group;
+
+ iommu_group_put(group);
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ if (__get_device_state(sbdf) != NULL) {
+ spin_unlock_irqrestore(&state_lock, flags);
+ ret = -EBUSY;
+ goto out_free_domain;
+ }
+
+ list_add_tail(&dev_state->list, &state_list);
+
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return 0;
+
+out_drop_group:
+ iommu_group_put(group);
+
+out_free_domain:
+ iommu_domain_free(dev_state->domain);
+
+out_free_states:
+ free_page((unsigned long)dev_state->states);
+
+out_free_dev_state:
+ kfree(dev_state);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_init_device);
+
+void amd_iommu_free_device(struct pci_dev *pdev)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+ u32 sbdf;
+
+ if (!amd_iommu_v2_supported())
+ return;
+
+ sbdf = get_pci_sbdf_id(pdev);
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ dev_state = __get_device_state(sbdf);
+ if (dev_state == NULL) {
+ spin_unlock_irqrestore(&state_lock, flags);
+ return;
+ }
+
+ list_del(&dev_state->list);
+
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ put_device_state(dev_state);
+ free_device_state(dev_state);
+}
+EXPORT_SYMBOL(amd_iommu_free_device);
+
+int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
+ amd_iommu_invalid_ppr_cb cb)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+ u32 sbdf;
+ int ret;
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ sbdf = get_pci_sbdf_id(pdev);
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ ret = -EINVAL;
+ dev_state = __get_device_state(sbdf);
+ if (dev_state == NULL)
+ goto out_unlock;
+
+ dev_state->inv_ppr_cb = cb;
+
+ ret = 0;
+
+out_unlock:
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
+
+int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
+ amd_iommu_invalidate_ctx cb)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+ u32 sbdf;
+ int ret;
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ sbdf = get_pci_sbdf_id(pdev);
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ ret = -EINVAL;
+ dev_state = __get_device_state(sbdf);
+ if (dev_state == NULL)
+ goto out_unlock;
+
+ dev_state->inv_ctx_cb = cb;
+
+ ret = 0;
+
+out_unlock:
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
+
+static int __init amd_iommu_v2_init(void)
+{
+ int ret;
+
+ if (!amd_iommu_v2_supported()) {
+ pr_info("AMD IOMMUv2 functionality not available on this system - This is not a bug.\n");
+ /*
+ * Load anyway to provide the symbols to other modules
+ * which may use AMD IOMMUv2 optionally.
+ */
+ return 0;
+ }
+
+ ret = -ENOMEM;
+ iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
+ if (iommu_wq == NULL)
+ goto out;
+
+ amd_iommu_register_ppr_notifier(&ppr_nb);
+
+ pr_info("AMD IOMMUv2 loaded and initialized\n");
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static void __exit amd_iommu_v2_exit(void)
+{
+ struct device_state *dev_state, *next;
+ unsigned long flags;
+ LIST_HEAD(freelist);
+
+ if (!amd_iommu_v2_supported())
+ return;
+
+ amd_iommu_unregister_ppr_notifier(&ppr_nb);
+
+ flush_workqueue(iommu_wq);
+
+ /*
+ * The loop below might call flush_workqueue(), so call
+ * destroy_workqueue() after it
+ */
+ spin_lock_irqsave(&state_lock, flags);
+
+ list_for_each_entry_safe(dev_state, next, &state_list, list) {
+ WARN_ON_ONCE(1);
+
+ put_device_state(dev_state);
+ list_del(&dev_state->list);
+ list_add_tail(&dev_state->list, &freelist);
+ }
+
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ /*
+ * Since free_device_state waits on the count to be zero,
+ * we need to free dev_state outside the spinlock.
+ */
+ list_for_each_entry_safe(dev_state, next, &freelist, list) {
+ list_del(&dev_state->list);
+ free_device_state(dev_state);
+ }
+
+ destroy_workqueue(iommu_wq);
+}
+
+module_init(amd_iommu_v2_init);
+module_exit(amd_iommu_v2_exit);
diff --git a/drivers/iommu/amd/quirks.c b/drivers/iommu/amd/quirks.c
new file mode 100644
index 0000000000..79dbb8f33b
--- /dev/null
+++ b/drivers/iommu/amd/quirks.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * Quirks for AMD IOMMU
+ *
+ * Copyright (C) 2019 Kai-Heng Feng <kai.heng.feng@canonical.com>
+ */
+
+#ifdef CONFIG_DMI
+#include <linux/dmi.h>
+
+#include "amd_iommu.h"
+
+#define IVHD_SPECIAL_IOAPIC 1
+
+struct ivrs_quirk_entry {
+ u8 id;
+ u32 devid;
+};
+
+enum {
+ DELL_INSPIRON_7375 = 0,
+ DELL_LATITUDE_5495,
+ LENOVO_IDEAPAD_330S_15ARR,
+};
+
+static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = {
+ /* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */
+ [DELL_INSPIRON_7375] = {
+ { .id = 4, .devid = 0xa0 },
+ { .id = 5, .devid = 0x2 },
+ {}
+ },
+ /* ivrs_ioapic[4]=00:14.0 */
+ [DELL_LATITUDE_5495] = {
+ { .id = 4, .devid = 0xa0 },
+ {}
+ },
+ /* ivrs_ioapic[32]=00:14.0 */
+ [LENOVO_IDEAPAD_330S_15ARR] = {
+ { .id = 32, .devid = 0xa0 },
+ {}
+ },
+ {}
+};
+
+static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
+{
+ const struct ivrs_quirk_entry *i;
+
+ for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
+ add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u32 *)&i->devid, 0);
+
+ return 0;
+}
+
+static const struct dmi_system_id ivrs_quirks[] __initconst = {
+ {
+ .callback = ivrs_ioapic_quirk_cb,
+ .ident = "Dell Inspiron 7375",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"),
+ },
+ .driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375],
+ },
+ {
+ .callback = ivrs_ioapic_quirk_cb,
+ .ident = "Dell Latitude 5495",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"),
+ },
+ .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
+ },
+ {
+ /*
+ * Acer Aspire A315-41 requires the very same workaround as
+ * Dell Latitude 5495
+ */
+ .callback = ivrs_ioapic_quirk_cb,
+ .ident = "Acer Aspire A315-41",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-41"),
+ },
+ .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
+ },
+ {
+ .callback = ivrs_ioapic_quirk_cb,
+ .ident = "Lenovo ideapad 330S-15ARR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "81FB"),
+ },
+ .driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR],
+ },
+ {}
+};
+
+void __init amd_iommu_apply_ivrs_quirks(void)
+{
+ dmi_check_system(ivrs_quirks);
+}
+#endif