diff mbox series

xen/sched: fix arinc653 to not use variables across cpupools

Message ID 20250313093157.30450-1-jgross@suse.com (mailing list archive)
State New
Headers show
Series xen/sched: fix arinc653 to not use variables across cpupools | expand

Commit Message

Juergen Gross March 13, 2025, 9:31 a.m. UTC
a653sched_do_schedule() is using two function local static variables,
which is resulting in bad behavior when using more than one cpupool
with the arinc653 scheduler.

Fix that by moving those variables to the scheduler private data.

Fixes: 22787f2e107c ("ARINC 653 scheduler")
Reported-by: Choi Anderson <Anderson.Choi@boeing.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/sched/arinc653.c | 31 ++++++++++++++++++-------------
 1 file changed, 18 insertions(+), 13 deletions(-)

Comments

Andrew Cooper March 13, 2025, 11:44 a.m. UTC | #1
On 13/03/2025 9:31 am, Juergen Gross wrote:
> a653sched_do_schedule() is using two function local static variables,
> which is resulting in bad behavior when using more than one cpupool
> with the arinc653 scheduler.
>
> Fix that by moving those variables to the scheduler private data.
>
> Fixes: 22787f2e107c ("ARINC 653 scheduler")
> Reported-by: Choi Anderson <Anderson.Choi@boeing.com>
> Signed-off-by: Juergen Gross <jgross@suse.com>

Oh lovely, those statics are nicely hidden in the local variable list.

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Nathan Studer March 13, 2025, 8:06 p.m. UTC | #2
+Jeff

On 13/03/25 07:44, Andrew Cooper wrote:
> On 13/03/2025 9:31 am, Juergen Gross wrote:
> > a653sched_do_schedule() is using two function local static variables,
> > which is resulting in bad behavior when using more than one cpupool
> > with the arinc653 scheduler.
> >
> > Fix that by moving those variables to the scheduler private data.
> >
> > Fixes: 22787f2e107c ("ARINC 653 scheduler")
> > Reported-by: Choi Anderson <Anderson.Choi@boeing.com>
> > Signed-off-by: Juergen Gross <jgross@suse.com>
> 
> Oh lovely, those statics are nicely hidden in the local variable list.
> 
> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

This was one of the issues Jeff fixed in this rejected patch, which we should have split out and submitted separately upstream:  https://lists.xenproject.org/archives/html/xen-devel/2020-09/msg01318.html

Acked-by: Nathan Studer <nathan.studer@dornerworks.com>
diff mbox series

Patch

diff --git a/xen/common/sched/arinc653.c b/xen/common/sched/arinc653.c
index a82c0d7314..9ebae6d7ae 100644
--- a/xen/common/sched/arinc653.c
+++ b/xen/common/sched/arinc653.c
@@ -143,6 +143,12 @@  typedef struct a653sched_priv_s
      * pointers to all Xen UNIT structures for iterating through
      */
     struct list_head unit_list;
+
+    /**
+     * scheduling house keeping variables
+     */
+    unsigned int sched_index;
+    s_time_t next_switch_time;
 } a653sched_priv_t;
 
 /**************************************************************************
@@ -513,8 +519,6 @@  a653sched_do_schedule(
     bool tasklet_work_scheduled)
 {
     struct sched_unit *new_task = NULL;
-    static unsigned int sched_index = 0;
-    static s_time_t next_switch_time;
     a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
     const unsigned int cpu = sched_get_resource_cpu(smp_processor_id());
     unsigned long flags;
@@ -528,18 +532,19 @@  a653sched_do_schedule(
         /* time to enter a new major frame
          * the first time this function is called, this will be true */
         /* start with the first domain in the schedule */
-        sched_index = 0;
+        sched_priv->sched_index = 0;
         sched_priv->next_major_frame = now + sched_priv->major_frame;
-        next_switch_time = now + sched_priv->schedule[0].runtime;
+        sched_priv->next_switch_time = now + sched_priv->schedule[0].runtime;
     }
     else
     {
-        while ( (now >= next_switch_time)
-                && (sched_index < sched_priv->num_schedule_entries) )
+        while ( (now >= sched_priv->next_switch_time) &&
+                (sched_priv->sched_index < sched_priv->num_schedule_entries) )
         {
             /* time to switch to the next domain in this major frame */
-            sched_index++;
-            next_switch_time += sched_priv->schedule[sched_index].runtime;
+            sched_priv->sched_index++;
+            sched_priv->next_switch_time +=
+                sched_priv->schedule[sched_priv->sched_index].runtime;
         }
     }
 
@@ -547,8 +552,8 @@  a653sched_do_schedule(
      * If we exhausted the domains in the schedule and still have time left
      * in the major frame then switch next at the next major frame.
      */
-    if ( sched_index >= sched_priv->num_schedule_entries )
-        next_switch_time = sched_priv->next_major_frame;
+    if ( sched_priv->sched_index >= sched_priv->num_schedule_entries )
+        sched_priv->next_switch_time = sched_priv->next_major_frame;
 
     /*
      * If there are more domains to run in the current major frame, set
@@ -556,8 +561,8 @@  a653sched_do_schedule(
      * Otherwise, set new_task equal to the address of the idle task's
      * sched_unit structure.
      */
-    new_task = (sched_index < sched_priv->num_schedule_entries)
-        ? sched_priv->schedule[sched_index].unit
+    new_task = (sched_priv->sched_index < sched_priv->num_schedule_entries)
+        ? sched_priv->schedule[sched_priv->sched_index].unit
         : IDLETASK(cpu);
 
     /* Check to see if the new task can be run (awake & runnable). */
@@ -589,7 +594,7 @@  a653sched_do_schedule(
      * Return the amount of time the next domain has to run and the address
      * of the selected task's UNIT structure.
      */
-    prev->next_time = next_switch_time - now;
+    prev->next_time = sched_priv->next_switch_time - now;
     prev->next_task = new_task;
     new_task->migrated = false;