@@ -126,12 +126,10 @@ extern struct cpumask __cpu_possible_mask;
extern struct cpumask __cpu_online_mask;
extern struct cpumask __cpu_present_mask;
extern struct cpumask __cpu_active_mask;
-extern struct cpumask __cpu_dying_mask;
#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
-#define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask)
extern atomic_t __num_online_cpus;
@@ -1035,15 +1033,6 @@ set_cpu_active(unsigned int cpu, bool active)
cpumask_clear_cpu(cpu, &__cpu_active_mask);
}
-static inline void
-set_cpu_dying(unsigned int cpu, bool dying)
-{
- if (dying)
- cpumask_set_cpu(cpu, &__cpu_dying_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_dying_mask);
-}
-
/**
* to_cpumask - convert a NR_CPUS bitmap to a struct cpumask *
* @bitmap: the bitmap
@@ -1119,11 +1108,6 @@ static inline bool cpu_active(unsigned int cpu)
return cpumask_test_cpu(cpu, cpu_active_mask);
}
-static inline bool cpu_dying(unsigned int cpu)
-{
- return cpumask_test_cpu(cpu, cpu_dying_mask);
-}
-
#else
#define num_online_cpus() 1U
@@ -1151,11 +1135,6 @@ static inline bool cpu_active(unsigned int cpu)
return cpu == 0;
}
-static inline bool cpu_dying(unsigned int cpu)
-{
- return false;
-}
-
#endif /* NR_CPUS > 1 */
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
@@ -54,6 +54,9 @@
* @rollback: Perform a rollback
* @single: Single callback invocation
* @bringup: Single callback bringup or teardown selector
+ * @goes_down: Indicator for direction of cpu_up()/cpu_down() operations
+ * including eventual rollbacks. Not affected by state or
+ * instance add/remove operations. See cpuhp_cpu_goes_down().
* @cpu: CPU number
* @node: Remote CPU node; for multi-instance, do a
* single entry callback for install/remove
@@ -74,6 +77,7 @@ struct cpuhp_cpu_state {
bool rollback;
bool single;
bool bringup;
+ bool goes_down;
struct hlist_node *node;
struct hlist_node *last;
enum cpuhp_state cb_state;
@@ -474,6 +478,37 @@ void cpu_maps_update_done(void)
mutex_unlock(&cpu_add_remove_lock);
}
+/**
+ * cpuhp_cpu_goes_down - Query the current/last CPU hotplug direction of a CPU
+ * @cpu: The CPU to query
+ *
+ * The direction indicator is modified by the hotplug core on
+ * cpu_up()/cpu_down() operations including eventual rollback operations.
+ * The indicator is not affected by state or instance install/remove
+ * operations.
+ *
+ * The indicator is sticky after the hotplug operation completes, whether
+ * the operation was a full up/down or just a partial bringup/teardown.
+ *
+ * goes_down
+ * cpu_up(target) enter -> False
+ * rollback on fail -> True
+ * cpu_up(target) exit Last state
+ *
+ * cpu_down(target) enter -> True
+ * rollback on fail -> False
+ * cpu_down(target) exit Last state
+ *
+ * The return value is a racy snapshot and not protected against concurrent
+ * CPU hotplug operations which modify the indicator.
+ *
+ * Returns: True if cached direction is down, false otherwise
+ */
+bool cpuhp_cpu_goes_down(unsigned int cpu)
+{
+ return data_race(per_cpu(cpuhp_state.goes_down, cpu));
+}
+
/*
* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
* Should always be manipulated under cpu_add_remove_lock
@@ -708,8 +743,7 @@ cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
st->target = target;
st->single = false;
st->bringup = bringup;
- if (cpu_dying(cpu) != !bringup)
- set_cpu_dying(cpu, !bringup);
+ st->goes_down = !bringup;
return prev_state;
}
@@ -743,8 +777,7 @@ cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
}
st->bringup = bringup;
- if (cpu_dying(cpu) != !bringup)
- set_cpu_dying(cpu, !bringup);
+ st->goes_down = !bringup;
}
/* Regular hotplug invocation of the AP hotplug thread */
@@ -3127,8 +3160,6 @@ EXPORT_SYMBOL(__cpu_present_mask);
struct cpumask __cpu_active_mask __read_mostly;
-struct cpumask __cpu_dying_mask __read_mostly;
-
atomic_t __num_online_cpus __read_mostly;
EXPORT_SYMBOL(__num_online_cpus);
@@ -2468,7 +2468,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
return cpu_online(cpu);
/* Regular kernel threads don't get to stay during offline. */
- if (cpu_dying(cpu))
+ if (cpuhp_cpu_goes_down(cpu))
return false;
/* But are allowed during online. */
@@ -9434,7 +9434,7 @@ static void balance_push(struct rq *rq)
* Only active while going offline and when invoked on the outgoing
* CPU.
*/
- if (!cpu_dying(rq->cpu) || rq != this_rq())
+ if (!cpuhp_cpu_goes_down(rq->cpu) || rq != this_rq())
return;
/*
@@ -20,4 +20,6 @@ int smpboot_unpark_threads(unsigned int cpu);
void __init cpuhp_threads_init(void);
+bool cpuhp_cpu_goes_down(unsigned int cpu);
+
#endif