diff mbox series

[3/4] workqueue: add schedule_on_each_cpumask helper

Message ID 20230530145335.930262644@redhat.com (mailing list archive)
State New
Headers show
Series vmstat bug fixes for nohz_full CPUs | expand

Commit Message

Marcelo Tosatti May 30, 2023, 2:52 p.m. UTC
Add a schedule_on_each_cpumask function, equivalent to
schedule_on_each_cpu but accepting a cpumask to operate.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

---

Comments

Andrew Morton May 30, 2023, 8:09 p.m. UTC | #1
On Tue, 30 May 2023 11:52:37 -0300 Marcelo Tosatti <mtosatti@redhat.com> wrote:

> Add a schedule_on_each_cpumask function, equivalent to
> schedule_on_each_cpu but accepting a cpumask to operate.
> 
> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
> 
> ---
> 
> Index: linux-vmstat-remote/kernel/workqueue.c
> ===================================================================
> --- linux-vmstat-remote.orig/kernel/workqueue.c
> +++ linux-vmstat-remote/kernel/workqueue.c
> @@ -3455,6 +3455,56 @@ int schedule_on_each_cpu(work_func_t fun
>  	return 0;
>  }
>  
> +
> +/**
> + * schedule_on_each_cpumask - execute a function synchronously on each
> + * CPU in "cpumask", for those which are online.
> + *
> + * @func: the function to call
> + * @mask: the CPUs which to call function on
> + *
> + * schedule_on_each_cpu() executes @func on each specified CPU that is online,
> + * using the system workqueue and blocks until all such CPUs have completed.
> + * schedule_on_each_cpu() is very slow.
> + *
> + * Return:
> + * 0 on success, -errno on failure.
> + */
> +int schedule_on_each_cpumask(work_func_t func, cpumask_t *cpumask)
> +{
> +	int cpu;
> +	struct work_struct __percpu *works;
> +	cpumask_var_t effmask;
> +
> +	works = alloc_percpu(struct work_struct);
> +	if (!works)
> +		return -ENOMEM;
> +
> +	if (!alloc_cpumask_var(&effmask, GFP_KERNEL)) {
> +		free_percpu(works);
> +		return -ENOMEM;
> +	}
> +
> +	cpumask_and(effmask, cpumask, cpu_online_mask);
> +
> +	cpus_read_lock();
> +
> +	for_each_cpu(cpu, effmask) {

Should we check here that the cpu is still online?

> +		struct work_struct *work = per_cpu_ptr(works, cpu);
> +
> +		INIT_WORK(work, func);
> +		schedule_work_on(cpu, work);
> +	}
> +
> +	for_each_cpu(cpu, effmask)
> +		flush_work(per_cpu_ptr(works, cpu));
> +
> +	cpus_read_unlock();
> +	free_percpu(works);
> +	free_cpumask_var(effmask);
> +	return 0;
> +}
> +
>  /**
>   * execute_in_process_context - reliably execute the routine with user context
>   * @fn:		the function to execute
> --- linux-vmstat-remote.orig/include/linux/workqueue.h
> +++ linux-vmstat-remote/include/linux/workqueue.h
> @@ -450,6 +450,7 @@ extern void __flush_workqueue(struct wor
>  extern void drain_workqueue(struct workqueue_struct *wq);
>  
>  extern int schedule_on_each_cpu(work_func_t func);
> +extern int schedule_on_each_cpumask(work_func_t func, cpumask_t *cpumask);

May as well make schedule_on_each_cpu() call
schedule_on_each_cpumask()?  Save a bit of text, and they're hardly
performance-critical to that extent.
diff mbox series

Patch

Index: linux-vmstat-remote/kernel/workqueue.c
===================================================================
--- linux-vmstat-remote.orig/kernel/workqueue.c
+++ linux-vmstat-remote/kernel/workqueue.c
@@ -3455,6 +3455,56 @@  int schedule_on_each_cpu(work_func_t fun
 	return 0;
 }
 
+
+/**
+ * schedule_on_each_cpumask - execute a function synchronously on each
+ * CPU in "cpumask", for those which are online.
+ *
+ * @func: the function to call
+ * @mask: the CPUs which to call function on
+ *
+ * schedule_on_each_cpu() executes @func on each specified CPU that is online,
+ * using the system workqueue and blocks until all such CPUs have completed.
+ * schedule_on_each_cpu() is very slow.
+ *
+ * Return:
+ * 0 on success, -errno on failure.
+ */
+int schedule_on_each_cpumask(work_func_t func, cpumask_t *cpumask)
+{
+	int cpu;
+	struct work_struct __percpu *works;
+	cpumask_var_t effmask;
+
+	works = alloc_percpu(struct work_struct);
+	if (!works)
+		return -ENOMEM;
+
+	if (!alloc_cpumask_var(&effmask, GFP_KERNEL)) {
+		free_percpu(works);
+		return -ENOMEM;
+	}
+
+	cpumask_and(effmask, cpumask, cpu_online_mask);
+
+	cpus_read_lock();
+
+	for_each_cpu(cpu, effmask) {
+		struct work_struct *work = per_cpu_ptr(works, cpu);
+
+		INIT_WORK(work, func);
+		schedule_work_on(cpu, work);
+	}
+
+	for_each_cpu(cpu, effmask)
+		flush_work(per_cpu_ptr(works, cpu));
+
+	cpus_read_unlock();
+	free_percpu(works);
+	free_cpumask_var(effmask);
+	return 0;
+}
+
 /**
  * execute_in_process_context - reliably execute the routine with user context
  * @fn:		the function to execute
Index: linux-vmstat-remote/include/linux/workqueue.h
===================================================================
--- linux-vmstat-remote.orig/include/linux/workqueue.h
+++ linux-vmstat-remote/include/linux/workqueue.h
@@ -450,6 +450,7 @@  extern void __flush_workqueue(struct wor
 extern void drain_workqueue(struct workqueue_struct *wq);
 
 extern int schedule_on_each_cpu(work_func_t func);
+extern int schedule_on_each_cpumask(work_func_t func, cpumask_t *cpumask);
 
 int execute_in_process_context(work_func_t fn, struct execute_work *);