Message ID | 20200817171558.325917-12-jean-philippe@linaro.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | iommu: Shared Virtual Addressing for SMMUv3 (PT sharing part) | expand |
Hi Jean, On 8/17/20 7:15 PM, Jean-Philippe Brucker wrote: > Implement the IOMMU device feature callbacks to support the SVA feature. > At the moment dev_has_feat() returns false since I/O Page Faults isn't > yet implemented. and because we don't advertise BTM, isn't it? > > Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> > --- > drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 26 ++++++ > .../iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 49 ++++++++++++ > drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 79 +++++++++++++++++++ > 3 files changed, 154 insertions(+) > > diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h > index 7b14b48a26c7..ba34914813ff 100644 > --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h > +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h > @@ -646,6 +646,8 @@ struct arm_smmu_master { > u32 *sids; > unsigned int num_sids; > bool ats_enabled; > + bool sva_enabled; > + struct list_head bonds; > unsigned int ssid_bits; > }; > > @@ -687,10 +689,34 @@ bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd); > > #ifdef CONFIG_ARM_SMMU_V3_SVA > bool arm_smmu_sva_supported(struct arm_smmu_device *smmu); > +bool arm_smmu_master_sva_supported(struct arm_smmu_master *master); > +bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master); > +int arm_smmu_master_enable_sva(struct arm_smmu_master *master); > +int arm_smmu_master_disable_sva(struct arm_smmu_master *master); > #else /* CONFIG_ARM_SMMU_V3_SVA */ > static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) > { > return false; > } > + > +static inline bool arm_smmu_master_sva_supported(struct arm_smmu_master *master) > +{ > + return false; > +} > + > +static inline bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master) > +{ > + return false; > +} > + > +static inline int arm_smmu_master_enable_sva(struct arm_smmu_master *master) > +{ > + return -ENODEV; > +} > + > +static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master) > +{ > + return -ENODEV; > +} > #endif /* CONFIG_ARM_SMMU_V3_SVA */ > #endif /* _ARM_SMMU_V3_H */ > diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c > index bf81d91ce71e..28027620cf2e 100644 > --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c > +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c > @@ -10,6 +10,8 @@ > #include "arm-smmu-v3.h" > #include "../../io-pgtable-arm.h" > > +static DEFINE_MUTEX(sva_lock); > + > /* > * Try to reserve this ASID in the SMMU. If it is in use, try to steal it from > * the private entry. Careful here, we may be modifying the context tables of > @@ -196,3 +198,50 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) > > return true; > } > + > +static bool arm_smmu_iopf_supported(struct arm_smmu_master *master) > +{ > + return false; > +} > + > +bool arm_smmu_master_sva_supported(struct arm_smmu_master *master) > +{ > + if (!(master->smmu->features & ARM_SMMU_FEAT_SVA)) > + return false; > + > + /* SSID and IOPF support are mandatory for the moment */ > + return master->ssid_bits && arm_smmu_iopf_supported(master); > +} > + > +bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master) > +{ > + bool enabled; > + > + mutex_lock(&sva_lock); > + enabled = master->sva_enabled; > + mutex_unlock(&sva_lock); > + return enabled; > +} > + > +int arm_smmu_master_enable_sva(struct arm_smmu_master *master) > +{ > + mutex_lock(&sva_lock); > + master->sva_enabled = true; > + mutex_unlock(&sva_lock); > + > + return 0; > +} > + > +int arm_smmu_master_disable_sva(struct arm_smmu_master *master) > +{ > + mutex_lock(&sva_lock); > + if (!list_empty(&master->bonds)) { > + dev_err(master->dev, "cannot disable SVA, device is bound\n"); > + mutex_unlock(&sva_lock); > + return -EBUSY; > + } > + master->sva_enabled = false; > + mutex_unlock(&sva_lock); > + > + return 0; > +} > diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c > index 15cb3d9c1a5d..5ed5bb42298f 100644 > --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c > +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c > @@ -2163,6 +2163,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) > master = dev_iommu_priv_get(dev); > smmu = master->smmu; > > + /* > + * Checking that SVA is disabled ensures that this device isn't bound to > + * any mm, and can be safely detached from its old domain. Bonds cannot > + * be removed concurrently since we're holding the group mutex. > + */ > + if (arm_smmu_master_sva_enabled(master)) { > + dev_err(dev, "cannot attach - SVA enabled\n"); > + return -EBUSY; > + } > + > arm_smmu_detach_dev(master); > > mutex_lock(&smmu_domain->init_mutex); > @@ -2310,6 +2320,7 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) > master->smmu = smmu; > master->sids = fwspec->ids; > master->num_sids = fwspec->num_ids; > + INIT_LIST_HEAD(&master->bonds); > dev_iommu_priv_set(dev, master); > > /* Check the SIDs are in range of the SMMU and our stream table */ > @@ -2362,6 +2373,7 @@ static void arm_smmu_release_device(struct device *dev) > return; > > master = dev_iommu_priv_get(dev); > + WARN_ON(arm_smmu_master_sva_enabled(master)); > arm_smmu_detach_dev(master); > arm_smmu_disable_pasid(master); > kfree(master); > @@ -2479,6 +2491,69 @@ static void arm_smmu_get_resv_regions(struct device *dev, > iommu_dma_get_resv_regions(dev, head); > } > > +static bool arm_smmu_dev_has_feature(struct device *dev, > + enum iommu_dev_features feat) > +{ > + struct arm_smmu_master *master = dev_iommu_priv_get(dev); > + > + if (!master) > + return false; > + > + switch (feat) { > + case IOMMU_DEV_FEAT_SVA: > + return arm_smmu_master_sva_supported(master); > + default: > + return false; > + } > +} > + > +static bool arm_smmu_dev_feature_enabled(struct device *dev, > + enum iommu_dev_features feat) > +{ > + struct arm_smmu_master *master = dev_iommu_priv_get(dev); > + > + if (!master) > + return false; > + > + switch (feat) { > + case IOMMU_DEV_FEAT_SVA: > + return arm_smmu_master_sva_enabled(master); > + default: > + return false; > + } > +} > + > +static int arm_smmu_dev_enable_feature(struct device *dev, > + enum iommu_dev_features feat) > +{ > + if (!arm_smmu_dev_has_feature(dev, feat)) > + return -ENODEV; > + > + if (arm_smmu_dev_feature_enabled(dev, feat)) > + return -EBUSY; > + > + switch (feat) { > + case IOMMU_DEV_FEAT_SVA: > + return arm_smmu_master_enable_sva(dev_iommu_priv_get(dev)); > + default: > + return -EINVAL; > + } > +} > + > +static int arm_smmu_dev_disable_feature(struct device *dev, > + enum iommu_dev_features feat) > +{ > + if (!arm_smmu_dev_feature_enabled(dev, feat)) > + return -EINVAL; > + > + switch (feat) { > + case IOMMU_DEV_FEAT_SVA: > + return arm_smmu_master_disable_sva(dev_iommu_priv_get(dev)); > + default: > + return -EINVAL; > + } > +} > + > static struct iommu_ops arm_smmu_ops = { > .capable = arm_smmu_capable, > .domain_alloc = arm_smmu_domain_alloc, > @@ -2497,6 +2572,10 @@ static struct iommu_ops arm_smmu_ops = { > .of_xlate = arm_smmu_of_xlate, > .get_resv_regions = arm_smmu_get_resv_regions, > .put_resv_regions = generic_iommu_put_resv_regions, > + .dev_has_feat = arm_smmu_dev_has_feature, > + .dev_feat_enabled = arm_smmu_dev_feature_enabled, > + .dev_enable_feat = arm_smmu_dev_enable_feature, > + .dev_disable_feat = arm_smmu_dev_disable_feature, > .pgsize_bitmap = -1UL, /* Restricted during device attach */ > }; > > Besides Reviewed-by: Eric Auger <eric.auger@redhat.com> Eric
On Tue, Sep 08, 2020 at 11:46:05AM +0200, Auger Eric wrote: > Hi Jean, > > On 8/17/20 7:15 PM, Jean-Philippe Brucker wrote: > > Implement the IOMMU device feature callbacks to support the SVA feature. > > At the moment dev_has_feat() returns false since I/O Page Faults isn't > > yet implemented. > and because we don't advertise BTM, isn't it? Right, adding it to the commit log > Besides > > Reviewed-by: Eric Auger <eric.auger@redhat.com> Thanks for the reviews! Jean
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 7b14b48a26c7..ba34914813ff 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -646,6 +646,8 @@ struct arm_smmu_master { u32 *sids; unsigned int num_sids; bool ats_enabled; + bool sva_enabled; + struct list_head bonds; unsigned int ssid_bits; }; @@ -687,10 +689,34 @@ bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd); #ifdef CONFIG_ARM_SMMU_V3_SVA bool arm_smmu_sva_supported(struct arm_smmu_device *smmu); +bool arm_smmu_master_sva_supported(struct arm_smmu_master *master); +bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master); +int arm_smmu_master_enable_sva(struct arm_smmu_master *master); +int arm_smmu_master_disable_sva(struct arm_smmu_master *master); #else /* CONFIG_ARM_SMMU_V3_SVA */ static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) { return false; } + +static inline bool arm_smmu_master_sva_supported(struct arm_smmu_master *master) +{ + return false; +} + +static inline bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master) +{ + return false; +} + +static inline int arm_smmu_master_enable_sva(struct arm_smmu_master *master) +{ + return -ENODEV; +} + +static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master) +{ + return -ENODEV; +} #endif /* CONFIG_ARM_SMMU_V3_SVA */ #endif /* _ARM_SMMU_V3_H */ diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index bf81d91ce71e..28027620cf2e 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -10,6 +10,8 @@ #include "arm-smmu-v3.h" #include "../../io-pgtable-arm.h" +static DEFINE_MUTEX(sva_lock); + /* * Try to reserve this ASID in the SMMU. If it is in use, try to steal it from * the private entry. Careful here, we may be modifying the context tables of @@ -196,3 +198,50 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) return true; } + +static bool arm_smmu_iopf_supported(struct arm_smmu_master *master) +{ + return false; +} + +bool arm_smmu_master_sva_supported(struct arm_smmu_master *master) +{ + if (!(master->smmu->features & ARM_SMMU_FEAT_SVA)) + return false; + + /* SSID and IOPF support are mandatory for the moment */ + return master->ssid_bits && arm_smmu_iopf_supported(master); +} + +bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master) +{ + bool enabled; + + mutex_lock(&sva_lock); + enabled = master->sva_enabled; + mutex_unlock(&sva_lock); + return enabled; +} + +int arm_smmu_master_enable_sva(struct arm_smmu_master *master) +{ + mutex_lock(&sva_lock); + master->sva_enabled = true; + mutex_unlock(&sva_lock); + + return 0; +} + +int arm_smmu_master_disable_sva(struct arm_smmu_master *master) +{ + mutex_lock(&sva_lock); + if (!list_empty(&master->bonds)) { + dev_err(master->dev, "cannot disable SVA, device is bound\n"); + mutex_unlock(&sva_lock); + return -EBUSY; + } + master->sva_enabled = false; + mutex_unlock(&sva_lock); + + return 0; +} diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 15cb3d9c1a5d..5ed5bb42298f 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2163,6 +2163,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) master = dev_iommu_priv_get(dev); smmu = master->smmu; + /* + * Checking that SVA is disabled ensures that this device isn't bound to + * any mm, and can be safely detached from its old domain. Bonds cannot + * be removed concurrently since we're holding the group mutex. + */ + if (arm_smmu_master_sva_enabled(master)) { + dev_err(dev, "cannot attach - SVA enabled\n"); + return -EBUSY; + } + arm_smmu_detach_dev(master); mutex_lock(&smmu_domain->init_mutex); @@ -2310,6 +2320,7 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) master->smmu = smmu; master->sids = fwspec->ids; master->num_sids = fwspec->num_ids; + INIT_LIST_HEAD(&master->bonds); dev_iommu_priv_set(dev, master); /* Check the SIDs are in range of the SMMU and our stream table */ @@ -2362,6 +2373,7 @@ static void arm_smmu_release_device(struct device *dev) return; master = dev_iommu_priv_get(dev); + WARN_ON(arm_smmu_master_sva_enabled(master)); arm_smmu_detach_dev(master); arm_smmu_disable_pasid(master); kfree(master); @@ -2479,6 +2491,69 @@ static void arm_smmu_get_resv_regions(struct device *dev, iommu_dma_get_resv_regions(dev, head); } +static bool arm_smmu_dev_has_feature(struct device *dev, + enum iommu_dev_features feat) +{ + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + + if (!master) + return false; + + switch (feat) { + case IOMMU_DEV_FEAT_SVA: + return arm_smmu_master_sva_supported(master); + default: + return false; + } +} + +static bool arm_smmu_dev_feature_enabled(struct device *dev, + enum iommu_dev_features feat) +{ + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + + if (!master) + return false; + + switch (feat) { + case IOMMU_DEV_FEAT_SVA: + return arm_smmu_master_sva_enabled(master); + default: + return false; + } +} + +static int arm_smmu_dev_enable_feature(struct device *dev, + enum iommu_dev_features feat) +{ + if (!arm_smmu_dev_has_feature(dev, feat)) + return -ENODEV; + + if (arm_smmu_dev_feature_enabled(dev, feat)) + return -EBUSY; + + switch (feat) { + case IOMMU_DEV_FEAT_SVA: + return arm_smmu_master_enable_sva(dev_iommu_priv_get(dev)); + default: + return -EINVAL; + } +} + +static int arm_smmu_dev_disable_feature(struct device *dev, + enum iommu_dev_features feat) +{ + if (!arm_smmu_dev_feature_enabled(dev, feat)) + return -EINVAL; + + switch (feat) { + case IOMMU_DEV_FEAT_SVA: + return arm_smmu_master_disable_sva(dev_iommu_priv_get(dev)); + default: + return -EINVAL; + } +} + static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, @@ -2497,6 +2572,10 @@ static struct iommu_ops arm_smmu_ops = { .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, + .dev_has_feat = arm_smmu_dev_has_feature, + .dev_feat_enabled = arm_smmu_dev_feature_enabled, + .dev_enable_feat = arm_smmu_dev_enable_feature, + .dev_disable_feat = arm_smmu_dev_disable_feature, .pgsize_bitmap = -1UL, /* Restricted during device attach */ };
Implement the IOMMU device feature callbacks to support the SVA feature. At the moment dev_has_feat() returns false since I/O Page Faults isn't yet implemented. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 26 ++++++ .../iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 49 ++++++++++++ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 79 +++++++++++++++++++ 3 files changed, 154 insertions(+)