@@ -499,7 +499,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
void ipoib_mcast_restart_task(struct work_struct *work);
int ipoib_mcast_start_thread(struct net_device *dev);
-int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
+int ipoib_mcast_stop_thread(struct net_device *dev);
void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
@@ -747,7 +747,7 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush)
clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
netif_carrier_off(dev);
- ipoib_mcast_stop_thread(dev, flush);
+ ipoib_mcast_stop_thread(dev);
ipoib_mcast_dev_flush(dev);
ipoib_flush_paths(dev);
@@ -1097,7 +1097,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
*/
ipoib_flush_paths(dev);
- ipoib_mcast_stop_thread(dev, 1);
+ ipoib_mcast_stop_thread(dev);
ipoib_mcast_dev_flush(dev);
ipoib_transport_dev_cleanup(dev);
@@ -648,7 +648,7 @@ int ipoib_mcast_start_thread(struct net_device *dev)
return 0;
}
-int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
+int ipoib_mcast_stop_thread(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -659,8 +659,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
cancel_delayed_work(&priv->mcast_task);
mutex_unlock(&mcast_mutex);
- if (flush)
- flush_workqueue(priv->wq);
+ flush_workqueue(priv->wq);
return 0;
}
@@ -838,8 +837,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "restarting multicast task\n");
- ipoib_mcast_stop_thread(dev, 0);
-
local_irq_save(flags);
netif_addr_lock(dev);
spin_lock(&priv->lock);
@@ -936,13 +933,10 @@ void ipoib_mcast_restart_task(struct work_struct *work)
* We have to cancel outside of the spinlock, but we have to
* take the rtnl lock or else we race with the removal of
* entries from the remove list in mcast_dev_flush as part
- * of ipoib_stop() which will call mcast_stop_thread with
- * flush == 1 while holding the rtnl lock, and the
- * flush_workqueue won't complete until this restart_mcast_task
- * completes. So do like the carrier on task and attempt to
- * take the rtnl lock, but if we can't before the ADMIN_UP flag
- * goes away, then just return and know that the remove list will
- * get flushed later by mcast_stop_thread.
+ * of ipoib_stop(). We detect the drop of the ADMIN_UP flag
+ * to signal that we have hit this particular race, and we
+ * return since we know we don't need to do anything else
+ * anyway.
*/
while (!rtnl_trylock()) {
if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
@@ -954,6 +948,9 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_mcast_leave(mcast->dev, mcast);
ipoib_mcast_free(mcast);
}
+ /*
+ * Restart our join task if needed
+ */
ipoib_mcast_start_thread(dev);
rtnl_unlock();
}
We used to pass a flush variable to mcast_stop_thread to indicate if we should flush the workqueue or not. This was due to some code trying to flush a workqueue that it was currently running on which is a no-no. Now that we have per-device work queues, and now that ipoib_mcast_restart_task has taken the fact that it is queued on a single thread workqueue with all of the ipoib_mcast_join_task's and therefore has no need to stop the join task while it runs, we can do away with the flush parameter and unilaterally flush always. Signed-off-by: Doug Ledford <dledford@redhat.com> --- drivers/infiniband/ulp/ipoib/ipoib.h | 2 +- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 4 ++-- drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 21 +++++++++------------ 3 files changed, 12 insertions(+), 15 deletions(-)