From e54def4ad8144ab15f826416e2e0f290ef1901b4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 19 Jun 2024 23:00:30 +0200 Subject: Adding upstream version 6.9.2. Signed-off-by: Daniel Baumann --- drivers/iommu/intel/svm.c | 63 ++++++++++------------------------------------- 1 file changed, 13 insertions(+), 50 deletions(-) (limited to 'drivers/iommu/intel/svm.c') diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 4d269df008..ee3b469e2d 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -22,7 +22,6 @@ #include "iommu.h" #include "pasid.h" #include "perf.h" -#include "../iommu-sva.h" #include "trace.h" static irqreturn_t prq_event_thread(int irq, void *d); @@ -315,10 +314,11 @@ out: return 0; } -static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev, - struct iommu_domain *domain, ioasid_t pasid) +static int intel_svm_set_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid) { struct device_domain_info *info = dev_iommu_priv_get(dev); + struct intel_iommu *iommu = info->iommu; struct mm_struct *mm = domain->mm; struct intel_svm_dev *sdev; struct intel_svm *svm; @@ -360,7 +360,6 @@ static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev, sdev->iommu = iommu; sdev->did = FLPT_DEFAULT_DID; sdev->sid = PCI_DEVID(info->bus, info->devfn); - init_rcu_head(&sdev->rcu); if (info->ats_enabled) { sdev->qdep = info->ats_qdep; if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS) @@ -408,13 +407,6 @@ void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid) if (svm->notifier.ops) mmu_notifier_unregister(&svm->notifier, mm); pasid_private_remove(svm->pasid); - /* - * We mandate that no page faults may be outstanding - * for the PASID when intel_svm_unbind_mm() is called. - * If that is not obeyed, subtle errors will happen. - * Let's make them less subtle... - */ - memset(svm, 0x6b, sizeof(*svm)); kfree(svm); } } @@ -562,16 +554,12 @@ static int prq_to_iommu_prot(struct page_req_dsc *req) return prot; } -static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, - struct page_req_dsc *desc) +static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, + struct page_req_dsc *desc) { - struct iommu_fault_event event; - - if (!dev || !dev_is_pci(dev)) - return -ENODEV; + struct iopf_fault event = { }; /* Fill in event data for device specific processing */ - memset(&event, 0, sizeof(struct iommu_fault_event)); event.fault.type = IOMMU_FAULT_PAGE_REQ; event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT; event.fault.prm.pasid = desc->pasid; @@ -603,7 +591,7 @@ static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, event.fault.prm.private_data[0] = ktime_to_ns(ktime_get()); } - return iommu_report_device_fault(dev, &event); + iommu_report_device_fault(dev, &event); } static void handle_bad_prq_event(struct intel_iommu *iommu, @@ -707,12 +695,10 @@ bad_req: goto bad_req; } - if (intel_svm_prq_report(iommu, dev, req)) - handle_bad_prq_event(iommu, req, QI_RESP_INVALID); - else - trace_prq_report(iommu, dev, req->qw_0, req->qw_1, - req->priv_data[0], req->priv_data[1], - iommu->prq_seq_number++); + intel_svm_prq_report(iommu, dev, req); + trace_prq_report(iommu, dev, req->qw_0, req->qw_1, + req->priv_data[0], req->priv_data[1], + iommu->prq_seq_number++); mutex_unlock(&iommu->iopf_lock); prq_advance: head = (head + sizeof(*req)) & PRQ_RING_MASK; @@ -743,9 +729,8 @@ prq_advance: return IRQ_RETVAL(handled); } -int intel_svm_page_response(struct device *dev, - struct iommu_fault_event *evt, - struct iommu_page_response *msg) +void intel_svm_page_response(struct device *dev, struct iopf_fault *evt, + struct iommu_page_response *msg) { struct device_domain_info *info = dev_iommu_priv_get(dev); struct intel_iommu *iommu = info->iommu; @@ -754,7 +739,6 @@ int intel_svm_page_response(struct device *dev, bool private_present; bool pasid_present; bool last_page; - int ret = 0; u16 sid; prm = &evt->fault.prm; @@ -763,16 +747,6 @@ int intel_svm_page_response(struct device *dev, private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA; last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; - if (!pasid_present) { - ret = -EINVAL; - goto out; - } - - if (prm->pasid == 0 || prm->pasid >= PASID_MAX) { - ret = -EINVAL; - goto out; - } - /* * Per VT-d spec. v3.0 ch7.7, system software must respond * with page group response if private data is present (PDP) @@ -801,17 +775,6 @@ int intel_svm_page_response(struct device *dev, qi_submit_sync(iommu, &desc, 1, 0); } -out: - return ret; -} - -static int intel_svm_set_dev_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t pasid) -{ - struct device_domain_info *info = dev_iommu_priv_get(dev); - struct intel_iommu *iommu = info->iommu; - - return intel_svm_bind_mm(iommu, dev, domain, pasid); } static void intel_svm_domain_free(struct iommu_domain *domain) -- cgit v1.2.3