Message ID | 6-v1-720585788a7d+811b-iommu_fwspec_p1_jgg@nvidia.com (mailing list archive) |
---|---|
State | Handled Elsewhere, archived |
Headers | show |
Series | IOMMU related FW parsing cleanup | expand |
On 29/11/2023 12:48 am, Jason Gunthorpe wrote: > The iommu_device_lock protects the iommu_device_list which is only read by > iommu_ops_from_fwnode(). > > This is now always called under the iommu_probe_device_lock, so we don't > need to double lock the linked list. Use the iommu_probe_device_lock on > the write side too. Please no, iommu_probe_device_lock() is a hack and we need to remove the *reason* it exists at all. And IMO just because iommu_present() is deprecated doesn't justify making it look utterly nonsensical - in no way does that have any relationship with probe_device, much less need to serialise against it! Thanks, Robin. > Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> > --- > drivers/iommu/iommu.c | 30 +++++++++++++----------------- > 1 file changed, 13 insertions(+), 17 deletions(-) > > diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c > index 08f29a1dfcd5f8..9557c2ec08d915 100644 > --- a/drivers/iommu/iommu.c > +++ b/drivers/iommu/iommu.c > @@ -146,7 +146,6 @@ struct iommu_group_attribute iommu_group_attr_##_name = \ > container_of(_kobj, struct iommu_group, kobj) > > static LIST_HEAD(iommu_device_list); > -static DEFINE_SPINLOCK(iommu_device_lock); > > static const struct bus_type * const iommu_buses[] = { > &platform_bus_type, > @@ -262,9 +261,9 @@ int iommu_device_register(struct iommu_device *iommu, > if (hwdev) > iommu->fwnode = dev_fwnode(hwdev); > > - spin_lock(&iommu_device_lock); > + mutex_lock(&iommu_probe_device_lock); > list_add_tail(&iommu->list, &iommu_device_list); > - spin_unlock(&iommu_device_lock); > + mutex_unlock(&iommu_probe_device_lock); > > for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) > err = bus_iommu_probe(iommu_buses[i]); > @@ -279,9 +278,9 @@ void iommu_device_unregister(struct iommu_device *iommu) > for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) > bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); > > - spin_lock(&iommu_device_lock); > + mutex_lock(&iommu_probe_device_lock); > list_del(&iommu->list); > - spin_unlock(&iommu_device_lock); > + mutex_unlock(&iommu_probe_device_lock); > > /* Pairs with the alloc in generic_single_device_group() */ > iommu_group_put(iommu->singleton_group); > @@ -316,9 +315,9 @@ int iommu_device_register_bus(struct iommu_device *iommu, > if (err) > return err; > > - spin_lock(&iommu_device_lock); > + mutex_lock(&iommu_probe_device_lock); > list_add_tail(&iommu->list, &iommu_device_list); > - spin_unlock(&iommu_device_lock); > + mutex_unlock(&iommu_probe_device_lock); > > err = bus_iommu_probe(bus); > if (err) { > @@ -2033,9 +2032,9 @@ bool iommu_present(const struct bus_type *bus) > > for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { > if (iommu_buses[i] == bus) { > - spin_lock(&iommu_device_lock); > + mutex_lock(&iommu_probe_device_lock); > ret = !list_empty(&iommu_device_list); > - spin_unlock(&iommu_device_lock); > + mutex_unlock(&iommu_probe_device_lock); > } > } > return ret; > @@ -2980,17 +2979,14 @@ EXPORT_SYMBOL_GPL(iommu_default_passthrough); > > const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) > { > - const struct iommu_ops *ops = NULL; > struct iommu_device *iommu; > > - spin_lock(&iommu_device_lock); > + lockdep_assert_held(&iommu_probe_device_lock); > + > list_for_each_entry(iommu, &iommu_device_list, list) > - if (iommu->fwnode == fwnode) { > - ops = iommu->ops; > - break; > - } > - spin_unlock(&iommu_device_lock); > - return ops; > + if (iommu->fwnode == fwnode) > + return iommu->ops; > + return NULL; > } > > int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
On Wed, Nov 29, 2023 at 05:58:08PM +0000, Robin Murphy wrote: > On 29/11/2023 12:48 am, Jason Gunthorpe wrote: > > The iommu_device_lock protects the iommu_device_list which is only read by > > iommu_ops_from_fwnode(). > > > > This is now always called under the iommu_probe_device_lock, so we don't > > need to double lock the linked list. Use the iommu_probe_device_lock on > > the write side too. > > Please no, iommu_probe_device_lock() is a hack and we need to remove the > *reason* it exists at all. Yes, I agree that goal is good However, it is doing a lot of things, removing it is not so easy. One thing it is quietly doing is keeping the ops and iommu_device pointers alive during the entire probe process against(deeply broken, but whatever) concurrent iommu driver removal. It is also protecting access to dev->iommu_group during the group formation process. So, it is a little more complex. My specific interest was to make it not a spinlock. > And IMO just because iommu_present() is > deprecated doesn't justify making it look utterly nonsensical - in no way > does that have any relationship with probe_device, much less need to > serialise against it! The naming is poor now, I agree, but it is not nonsensical since it still holds the correct lock for the data it is accessing. Thanks, Jason
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 08f29a1dfcd5f8..9557c2ec08d915 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -146,7 +146,6 @@ struct iommu_group_attribute iommu_group_attr_##_name = \ container_of(_kobj, struct iommu_group, kobj) static LIST_HEAD(iommu_device_list); -static DEFINE_SPINLOCK(iommu_device_lock); static const struct bus_type * const iommu_buses[] = { &platform_bus_type, @@ -262,9 +261,9 @@ int iommu_device_register(struct iommu_device *iommu, if (hwdev) iommu->fwnode = dev_fwnode(hwdev); - spin_lock(&iommu_device_lock); + mutex_lock(&iommu_probe_device_lock); list_add_tail(&iommu->list, &iommu_device_list); - spin_unlock(&iommu_device_lock); + mutex_unlock(&iommu_probe_device_lock); for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) err = bus_iommu_probe(iommu_buses[i]); @@ -279,9 +278,9 @@ void iommu_device_unregister(struct iommu_device *iommu) for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); - spin_lock(&iommu_device_lock); + mutex_lock(&iommu_probe_device_lock); list_del(&iommu->list); - spin_unlock(&iommu_device_lock); + mutex_unlock(&iommu_probe_device_lock); /* Pairs with the alloc in generic_single_device_group() */ iommu_group_put(iommu->singleton_group); @@ -316,9 +315,9 @@ int iommu_device_register_bus(struct iommu_device *iommu, if (err) return err; - spin_lock(&iommu_device_lock); + mutex_lock(&iommu_probe_device_lock); list_add_tail(&iommu->list, &iommu_device_list); - spin_unlock(&iommu_device_lock); + mutex_unlock(&iommu_probe_device_lock); err = bus_iommu_probe(bus); if (err) { @@ -2033,9 +2032,9 @@ bool iommu_present(const struct bus_type *bus) for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { if (iommu_buses[i] == bus) { - spin_lock(&iommu_device_lock); + mutex_lock(&iommu_probe_device_lock); ret = !list_empty(&iommu_device_list); - spin_unlock(&iommu_device_lock); + mutex_unlock(&iommu_probe_device_lock); } } return ret; @@ -2980,17 +2979,14 @@ EXPORT_SYMBOL_GPL(iommu_default_passthrough); const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) { - const struct iommu_ops *ops = NULL; struct iommu_device *iommu; - spin_lock(&iommu_device_lock); + lockdep_assert_held(&iommu_probe_device_lock); + list_for_each_entry(iommu, &iommu_device_list, list) - if (iommu->fwnode == fwnode) { - ops = iommu->ops; - break; - } - spin_unlock(&iommu_device_lock); - return ops; + if (iommu->fwnode == fwnode) + return iommu->ops; + return NULL; } int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
The iommu_device_lock protects the iommu_device_list which is only read by iommu_ops_from_fwnode(). This is now always called under the iommu_probe_device_lock, so we don't need to double lock the linked list. Use the iommu_probe_device_lock on the write side too. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> --- drivers/iommu/iommu.c | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-)