diff mbox series

[RFC,07/10] sched: core: remove ASSERT_NOT_IN_ATOMIC and disable preemption[!]

Message ID 20210223023428.757694-8-volodymyr_babchuk@epam.com (mailing list archive)
State New, archived
Headers show
Series Preemption in hypervisor (ARM only) | expand

Commit Message

Volodymyr Babchuk Feb. 23, 2021, 2:34 a.m. UTC
ASSERT_NOT_IN_ATOMIC() is very strict, because it checks that
local IRQs are disabled. But there is a case when this is okay:
when we finished handling IRQ in hypervisor mode we might want
to preempt current vCPU. In this case scheduler will be called
with local IRQs disabled.

On other hand, we want to ensure that scheduler code is not preempted
itself. So we need to disable preemption while doing scheduling.

WARNING! This patch works only for ARM code, because ARM code returns
after call to sched_context_switch() and it is able to enable
preemption back. In case of x86 further investigation required.

Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
---
 xen/arch/arm/domain.c   |  3 +++
 xen/common/sched/core.c | 13 ++++++++++---
 2 files changed, 13 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index bdd3d3e5b5..2ccf4449ea 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -335,6 +335,9 @@  static void continue_new_vcpu(struct vcpu *prev)
 
     schedule_tail(prev);
 
+    /* This matches preempt_disable() in schedule() */
+    preempt_enable_no_sched();
+
     if ( is_idle_vcpu(current) )
         reset_stack_and_jump(idle_loop);
     else if ( is_32bit_domain(current->domain) )
diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index 7e075613d5..057b558367 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -2577,8 +2577,6 @@  static void sched_slave(void)
     unsigned int          cpu = smp_processor_id();
     unsigned long         flags;
 
-    ASSERT_NOT_IN_ATOMIC();
-
     rcu_read_lock(&sched_res_rculock);
 
     lock = pcpu_schedule_lock_irqsave(cpu, &flags);
@@ -2643,7 +2641,7 @@  static void schedule(void)
     int cpu = smp_processor_id();
     unsigned int          gran;
 
-    ASSERT_NOT_IN_ATOMIC();
+    preempt_disable();
 
     SCHED_STAT_CRANK(sched_run);
 
@@ -2665,6 +2663,9 @@  static void schedule(void)
         rcu_read_unlock(&sched_res_rculock);
 
         raise_softirq(SCHEDULE_SOFTIRQ);
+
+        preempt_enable_no_sched();
+
         return sched_slave();
     }
 
@@ -2681,7 +2682,10 @@  static void schedule(void)
         cpumask_raise_softirq(mask, SCHED_SLAVE_SOFTIRQ);
         next = sched_wait_rendezvous_in(prev, &lock, &flags, cpu, now);
         if ( !next )
+        {
+            preempt_enable_no_sched();
             return;
+        }
     }
     else
     {
@@ -2695,6 +2699,9 @@  static void schedule(void)
     vnext = sched_unit2vcpu_cpu(next, cpu);
     sched_context_switch(vprev, vnext,
                          !is_idle_unit(prev) && is_idle_unit(next), now);
+
+    /* XXX: Move me */
+    preempt_enable_no_sched();
 }
 
 /* The scheduler timer: force a run through the scheduler */