@@ -98,6 +98,35 @@
extern struct kmem_cache *lnet_rspt_cachep;
extern struct kmem_cache *lnet_msg_cachep;
+static inline bool
+lnet_ni_set_status_locked(struct lnet_ni *ni, u32 status)
+__must_hold(&ni->ni_lock)
+{
+ bool update = false;
+
+ if (ni->ni_status && ni->ni_status->ns_status != status) {
+ CDEBUG(D_NET, "ni %s status changed from %#x to %#x\n",
+ libcfs_nid2str(ni->ni_nid),
+ ni->ni_status->ns_status, status);
+ ni->ni_status->ns_status = status;
+ update = true;
+ }
+
+ return update;
+}
+
+static inline bool
+lnet_ni_set_status(struct lnet_ni *ni, u32 status)
+{
+ bool update;
+
+ spin_lock(&ni->ni_lock);
+ update = lnet_ni_set_status_locked(ni, status);
+ spin_unlock(&ni->ni_lock);
+
+ return update;
+}
+
bool lnet_is_route_alive(struct lnet_route *route);
bool lnet_is_gateway_alive(struct lnet_peer *gw);
@@ -4012,11 +4012,7 @@ void lnet_monitor_thr_stop(void)
spin_lock(&ni->ni_net->net_lock);
ni->ni_net->net_last_alive = ktime_get_real_seconds();
spin_unlock(&ni->ni_net->net_lock);
- if (ni->ni_status &&
- ni->ni_status->ns_status == LNET_NI_STATUS_DOWN) {
- ni->ni_status->ns_status = LNET_NI_STATUS_UP;
- push = true;
- }
+ push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP);
lnet_ni_unlock(ni);
}
@@ -1014,15 +1014,9 @@ int lnet_get_rtr_pool_cfg(int cpt, struct lnet_ioctl_pool_cfg *pool_cfg)
struct lnet_ni *ni;
bool update = false;
- list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
- lnet_ni_lock(ni);
- if (ni->ni_status &&
- ni->ni_status->ns_status != status) {
- ni->ni_status->ns_status = status;
+ list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
+ if (lnet_ni_set_status(ni, status))
update = true;
- }
- lnet_ni_unlock(ni);
- }
return update;
}
@@ -1031,6 +1025,7 @@ int lnet_get_rtr_pool_cfg(int cpt, struct lnet_ioctl_pool_cfg *pool_cfg)
lnet_update_ni_status_locked(void)
{
struct lnet_net *net;
+ struct lnet_ni *ni;
bool push = false;
time64_t now;
time64_t timeout;
@@ -1045,13 +1040,13 @@ int lnet_get_rtr_pool_cfg(int cpt, struct lnet_ioctl_pool_cfg *pool_cfg)
continue;
if (now < net->net_last_alive + timeout)
- continue;
+ goto check_ni_fatal;
spin_lock(&net->net_lock);
/* re-check with lock */
if (now < net->net_last_alive + timeout) {
spin_unlock(&net->net_lock);
- continue;
+ goto check_ni_fatal;
}
spin_unlock(&net->net_lock);
@@ -1059,7 +1054,25 @@ int lnet_get_rtr_pool_cfg(int cpt, struct lnet_ioctl_pool_cfg *pool_cfg)
* timeout on any of its constituent NIs, then mark all
* the NIs down.
*/
- push = lnet_net_set_status_locked(net, LNET_NI_STATUS_DOWN);
+ if (lnet_net_set_status_locked(net, LNET_NI_STATUS_DOWN)) {
+ push = true;
+ continue;
+ }
+
+check_ni_fatal:
+ list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
+ /* lnet_ni_set_status() will perform the same check of
+ * ni_status while holding the ni lock. We can safely
+ * check ni_status without that lock because it is only
+ * written to under net_lock/EX and our caller is
+ * holding a net lock.
+ */
+ if (atomic_read(&ni->ni_fatal_error_on) &&
+ ni->ni_status &&
+ ni->ni_status->ns_status != LNET_NI_STATUS_DOWN &&
+ lnet_ni_set_status(ni, LNET_NI_STATUS_DOWN))
+ push = true;
+ }
}
return push;