diff mbox series

[RFC,02/16] padata: Return first error from a job

Message ID 20220106004656.126790-3-daniel.m.jordan@oracle.com (mailing list archive)
State New
Headers show
Series padata, vfio, sched: Multithreaded VFIO page pinning | expand

Commit Message

Daniel Jordan Jan. 6, 2022, 12:46 a.m. UTC
The only current user of multithreaded jobs, deferred struct page init,
can't fail, but soon the world won't be perfect anymore.  Return the
first error encountered during a job.

Threads can fail for different reasons, which may need special handling
in the future, but returning the first will do for the upcoming new user
because the kernel unwinds the same way no matter the error.

Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
---
 include/linux/padata.h |  5 +++--
 kernel/padata.c        | 22 ++++++++++++++++------
 mm/page_alloc.c        |  4 +++-
 3 files changed, 22 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/padata.h b/include/linux/padata.h
index 0dc031d54742..1c8670a24ccf 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -126,6 +126,7 @@  struct padata_shell {
  * struct padata_mt_job - represents one multithreaded job
  *
  * @thread_fn: Called for each chunk of work that a padata thread does.
+ *             Returns 0 or client-specific nonzero error code.
  * @fn_arg: The thread function argument.
  * @start: The start of the job (units are job-specific).
  * @size: size of this node's work (units are job-specific).
@@ -138,7 +139,7 @@  struct padata_shell {
  *               depending on task size and minimum chunk size.
  */
 struct padata_mt_job {
-	void (*thread_fn)(unsigned long start, unsigned long end, void *arg);
+	int (*thread_fn)(unsigned long start, unsigned long end, void *arg);
 	void			*fn_arg;
 	unsigned long		start;
 	unsigned long		size;
@@ -188,7 +189,7 @@  extern void padata_free_shell(struct padata_shell *ps);
 extern int padata_do_parallel(struct padata_shell *ps,
 			      struct padata_priv *padata, int *cb_cpu);
 extern void padata_do_serial(struct padata_priv *padata);
-extern void padata_do_multithreaded(struct padata_mt_job *job);
+extern int padata_do_multithreaded(struct padata_mt_job *job);
 extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
 			      cpumask_var_t cpumask);
 #endif
diff --git a/kernel/padata.c b/kernel/padata.c
index 5d13920d2a12..1596ca22b316 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -54,6 +54,7 @@  struct padata_mt_job_state {
 	struct padata_mt_job	*job;
 	int			nworks;
 	int			nworks_fini;
+	int			error; /* first error from thread_fn */
 	unsigned long		chunk_size;
 };
 
@@ -446,8 +447,9 @@  static void padata_mt_helper(struct work_struct *w)
 
 	spin_lock(&ps->lock);
 
-	while (job->size > 0) {
+	while (job->size > 0 && ps->error == 0) {
 		unsigned long start, size, end;
+		int ret;
 
 		start = job->start;
 		/* So end is chunk size aligned if enough work remains. */
@@ -459,8 +461,12 @@  static void padata_mt_helper(struct work_struct *w)
 		job->size -= size;
 
 		spin_unlock(&ps->lock);
-		job->thread_fn(start, end, job->fn_arg);
+		ret = job->thread_fn(start, end, job->fn_arg);
 		spin_lock(&ps->lock);
+
+		/* Save first error code only. */
+		if (ps->error == 0)
+			ps->error = ret;
 	}
 
 	++ps->nworks_fini;
@@ -476,8 +482,10 @@  static void padata_mt_helper(struct work_struct *w)
  * @job: Description of the job.
  *
  * See the definition of struct padata_mt_job for more details.
+ *
+ * Return: 0 or a client-specific nonzero error code.
  */
-void padata_do_multithreaded(struct padata_mt_job *job)
+int padata_do_multithreaded(struct padata_mt_job *job)
 {
 	/* In case threads finish at different times. */
 	static const unsigned long load_balance_factor = 4;
@@ -487,7 +495,7 @@  void padata_do_multithreaded(struct padata_mt_job *job)
 	int nworks;
 
 	if (job->size == 0)
-		return;
+		return 0;
 
 	/* Ensure at least one thread when size < min_chunk. */
 	nworks = max(job->size / job->min_chunk, 1ul);
@@ -495,8 +503,8 @@  void padata_do_multithreaded(struct padata_mt_job *job)
 
 	if (nworks == 1) {
 		/* Single thread, no coordination needed, cut to the chase. */
-		job->thread_fn(job->start, job->start + job->size, job->fn_arg);
-		return;
+		return job->thread_fn(job->start, job->start + job->size,
+				      job->fn_arg);
 	}
 
 	spin_lock_init(&ps.lock);
@@ -504,6 +512,7 @@  void padata_do_multithreaded(struct padata_mt_job *job)
 	ps.job	       = job;
 	ps.nworks      = padata_work_alloc_mt(nworks, &ps, &works);
 	ps.nworks_fini = 0;
+	ps.error       = 0;
 
 	/*
 	 * Chunk size is the amount of work a helper does per call to the
@@ -527,6 +536,7 @@  void padata_do_multithreaded(struct padata_mt_job *job)
 
 	destroy_work_on_stack(&my_work.pw_work);
 	padata_works_free(&works);
+	return ps.error;
 }
 
 static void __padata_list_init(struct padata_list *pd_list)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index eeb3a9cb36bb..039786d840cf 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2018,7 +2018,7 @@  deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
 	return nr_pages;
 }
 
-static void __init
+static int __init
 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
 			   void *arg)
 {
@@ -2036,6 +2036,8 @@  deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
 		deferred_init_maxorder(&i, zone, &spfn, &epfn);
 		cond_resched();
 	}
+
+	return 0;
 }
 
 /* An arch may override for more concurrency. */