@@ -4,6 +4,7 @@
* Track atomic regions in the hypervisor which disallow sleeping.
*
* Copyright (c) 2010, Keir Fraser <keir@xen.org>
+ * Copyright (c) 2021, EPAM Systems
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,13 +22,42 @@
#include <xen/preempt.h>
#include <xen/irq.h>
+#include <xen/sched.h>
+#include <xen/wait.h>
#include <asm/system.h>
DEFINE_PER_CPU(atomic_t, __preempt_count);
+DEFINE_PER_CPU(unsigned int, need_reschedule);
bool_t in_atomic(void)
{
- return atomic_read(&preempt_count()) || in_irq() || local_irq_is_enabled();
+ return atomic_read(&preempt_count()) || in_irq();
+}
+
+void try_preempt(bool force)
+{
+ /*
+ * If caller wants us to call the scheduler, but we are in atomic
+ * context - update the flag. We will try preemption upon exit
+ * from atomic context.
+ */
+ if ( force && in_atomic() )
+ {
+ this_cpu(need_reschedule) = 1;
+ return;
+ }
+
+ /* idle vCPU schedules via soft IRQs */
+ if ( unlikely(system_state != SYS_STATE_active) ||
+ in_atomic() ||
+ is_idle_vcpu(current) )
+ return;
+
+ if ( force || this_cpu(need_reschedule) )
+ {
+ this_cpu(need_reschedule) = 0;
+ wait();
+ }
}
#ifndef NDEBUG
@@ -4,6 +4,7 @@
* Track atomic regions in the hypervisor which disallow sleeping.
*
* Copyright (c) 2010, Keir Fraser <keir@xen.org>
+ * Copyright (c) 2021, EPAM Systems
*/
#ifndef __XEN_PREEMPT_H__
@@ -15,6 +16,8 @@
DECLARE_PER_CPU(atomic_t, __preempt_count);
+void try_preempt(bool force);
+
#define preempt_count() (this_cpu(__preempt_count))
#define preempt_disable() do { \
@@ -23,6 +26,11 @@ DECLARE_PER_CPU(atomic_t, __preempt_count);
#define preempt_enable() do { \
atomic_dec(&preempt_count()); \
+ try_preempt(false); \
+} while (0)
+
+#define preempt_enable_no_sched() do { \
+ atomic_dec(&preempt_count()); \
} while (0)
bool_t in_atomic(void);
This function can be used to preempt code running in hypervisor mode. Generally, there are two reasons to preempt while in HYP mode: 1. IRQ arrived. This may woke vCPU with higher scheduling priority. 2. Exit from atomic context. While we were in atomic context, state of the system may changed and we need to reschedule. It is very inefficient to call scheduler each time we leave atomic context, so very simple optimists is used. There are cases when we *know* that there might be reasons for preemption. One example - is IRQ. In this case we call try_preempt(true). This will force rescheduling if we are outside atomic context or it will ensure that scheduler will be called right after leaving atomic context. This is done by calling try_preempt(false) when we are leaving atomic context. try_preempt(false) will check if there was call to try_preempt(true) in atomic context and call scheduler only in this case. Also macro preempt_enable_no_sched() is introduced. It is meant to be used by scheduler itself, because we don't want to initiate rescheduling inside scheduler code. Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com> --- xen/common/preempt.c | 32 +++++++++++++++++++++++++++++++- xen/include/xen/preempt.h | 8 ++++++++ 2 files changed, 39 insertions(+), 1 deletion(-)