Message ID | 20220817012024.3251276-8-baolu.lu@linux.intel.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | iommu: SVA and IOPF refactoring | expand |
On Wed, Aug 17, 2022 at 09:20:18AM +0800, Lu Baolu wrote: > +static int intel_svm_set_dev_pasid(struct iommu_domain *domain, > + struct device *dev, ioasid_t pasid) > +{ > + struct device_domain_info *info = dev_iommu_priv_get(dev); > + struct intel_iommu *iommu = info->iommu; > + struct iommu_sva *sva; > + int ret = 0; > + > + mutex_lock(&pasid_mutex); > + /* > + * Detach the domain if a blocking domain is set. Check the > + * right domain type once the IOMMU driver supports a real > + * blocking domain. > + */ > + if (!domain || domain->type == IOMMU_DOMAIN_UNMANAGED) { > + intel_svm_unbind_mm(dev, pasid); See, I think this is exactly the wrong way to use the ops The blockin domain ops should have its own function that just unconditionally calls intel_svm_unbind_mm() > + } else { > + struct mm_struct *mm = domain->mm; > + > + sva = intel_svm_bind_mm(iommu, dev, mm); > + if (IS_ERR(sva)) > + ret = PTR_ERR(sva); And similarly the SVA domain should have its own op that does this SVM call. Muxing the ops with tests on the domain is an anti-pattern. In fact I would say any time you see an op testing the domain->type it is very suspicious. Jason
On 2022/8/18 21:36, Jason Gunthorpe wrote: > On Wed, Aug 17, 2022 at 09:20:18AM +0800, Lu Baolu wrote: > >> +static int intel_svm_set_dev_pasid(struct iommu_domain *domain, >> + struct device *dev, ioasid_t pasid) >> +{ >> + struct device_domain_info *info = dev_iommu_priv_get(dev); >> + struct intel_iommu *iommu = info->iommu; >> + struct iommu_sva *sva; >> + int ret = 0; >> + >> + mutex_lock(&pasid_mutex); >> + /* >> + * Detach the domain if a blocking domain is set. Check the >> + * right domain type once the IOMMU driver supports a real >> + * blocking domain. >> + */ >> + if (!domain || domain->type == IOMMU_DOMAIN_UNMANAGED) { >> + intel_svm_unbind_mm(dev, pasid); > > See, I think this is exactly the wrong way to use the ops > > The blockin domain ops should have its own function that just > unconditionally calls intel_svm_unbind_mm() > >> + } else { >> + struct mm_struct *mm = domain->mm; >> + >> + sva = intel_svm_bind_mm(iommu, dev, mm); >> + if (IS_ERR(sva)) >> + ret = PTR_ERR(sva); > > And similarly the SVA domain should have its own op that does this SVM > call. > > Muxing the ops with tests on the domain is an anti-pattern. In fact I > would say any time you see an op testing the domain->type it is very > suspicious. Both agreed. Will fix them in the next version. Best regards, baolu
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index a9b8367c9361..4875c9974abd 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -747,6 +747,7 @@ void intel_svm_unbind(struct iommu_sva *handle); u32 intel_svm_get_pasid(struct iommu_sva *handle); int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt, struct iommu_page_response *msg); +struct iommu_domain *intel_svm_domain_alloc(void); struct intel_svm_dev { struct list_head list; @@ -772,6 +773,10 @@ struct intel_svm { }; #else static inline void intel_svm_check(struct intel_iommu *iommu) {} +static inline struct iommu_domain *intel_svm_domain_alloc(void) +{ + return NULL; +} #endif #ifdef CONFIG_INTEL_IOMMU_DEBUGFS diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 7cca030a508e..27c9fd6139a8 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4149,6 +4149,8 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) return domain; case IOMMU_DOMAIN_IDENTITY: return &si_domain->domain; + case IOMMU_DOMAIN_SVA: + return intel_svm_domain_alloc(); default: return NULL; } diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 2420fa5c2360..16a4d413fce4 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -928,3 +928,53 @@ int intel_svm_page_response(struct device *dev, mutex_unlock(&pasid_mutex); return ret; } + +static int intel_svm_set_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct intel_iommu *iommu = info->iommu; + struct iommu_sva *sva; + int ret = 0; + + mutex_lock(&pasid_mutex); + /* + * Detach the domain if a blocking domain is set. Check the + * right domain type once the IOMMU driver supports a real + * blocking domain. + */ + if (!domain || domain->type == IOMMU_DOMAIN_UNMANAGED) { + intel_svm_unbind_mm(dev, pasid); + } else { + struct mm_struct *mm = domain->mm; + + sva = intel_svm_bind_mm(iommu, dev, mm); + if (IS_ERR(sva)) + ret = PTR_ERR(sva); + } + mutex_unlock(&pasid_mutex); + + return ret; +} + +static void intel_svm_domain_free(struct iommu_domain *domain) +{ + kfree(to_dmar_domain(domain)); +} + +static const struct iommu_domain_ops intel_svm_domain_ops = { + .set_dev_pasid = intel_svm_set_dev_pasid, + .free = intel_svm_domain_free, +}; + +struct iommu_domain *intel_svm_domain_alloc(void) +{ + struct dmar_domain *domain; + + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return NULL; + domain->domain.ops = &intel_svm_domain_ops; + + return &domain->domain; +}