@@ -1769,6 +1769,7 @@ static void __context_switch(void)
if ( !is_idle_domain(pd) )
{
+ ASSERT(read_atomic(&p->dirty_cpu) == cpu);
memcpy(&p->arch.user_regs, stack_regs, CTXT_SWITCH_STACK_BYTES);
vcpu_save_fpu(p);
pd->arch.ctxt_switch->from(p);
@@ -1832,7 +1833,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
{
unsigned int cpu = smp_processor_id();
const struct domain *prevd = prev->domain, *nextd = next->domain;
- unsigned int dirty_cpu = next->dirty_cpu;
+ unsigned int dirty_cpu = read_atomic(&next->dirty_cpu);
ASSERT(prev != next);
ASSERT(local_irq_is_enabled());
@@ -1844,6 +1845,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
{
/* Remote CPU calls __sync_local_execstate() from flush IPI handler. */
flush_mask(cpumask_of(dirty_cpu), FLUSH_VCPU_STATE);
+ ASSERT(read_atomic(&next->dirty_cpu) == VCPU_CPU_CLEAN);
}
_update_runstate_area(prev);
@@ -1956,13 +1958,17 @@ void sync_local_execstate(void)
void sync_vcpu_execstate(struct vcpu *v)
{
- if ( v->dirty_cpu == smp_processor_id() )
+ unsigned int dirty_cpu = read_atomic(&v->dirty_cpu);
+
+ if ( dirty_cpu == smp_processor_id() )
sync_local_execstate();
- else if ( vcpu_cpu_dirty(v) )
+ else if ( is_vcpu_dirty_cpu(dirty_cpu) )
{
/* Remote CPU calls __sync_local_execstate() from flush IPI handler. */
- flush_mask(cpumask_of(v->dirty_cpu), FLUSH_VCPU_STATE);
+ flush_mask(cpumask_of(dirty_cpu), FLUSH_VCPU_STATE);
}
+ ASSERT(read_atomic(&v->dirty_cpu) != dirty_cpu ||
+ dirty_cpu == VCPU_CPU_CLEAN);
}
static int relinquish_memory(
@@ -844,7 +844,7 @@ static inline bool is_vcpu_dirty_cpu(unsigned int cpu)
static inline bool vcpu_cpu_dirty(const struct vcpu *v)
{
- return is_vcpu_dirty_cpu(v->dirty_cpu);
+ return is_vcpu_dirty_cpu(read_atomic(&v->dirty_cpu));
}
void vcpu_block(void);
The dirty_cpu field of struct vcpu denotes which cpu still holds data of a vcpu. All accesses to this field should be atomic in case the vcpu could just be running, as it is accessed without any lock held in most cases. There are some instances where accesses are not atomically done, and even worse where multiple accesses are done when a single one would be mandated. Correct that in order to avoid potential problems. Add some assertions to verify dirty_cpu is handled properly. Signed-off-by: Juergen Gross <jgross@suse.com> --- xen/arch/x86/domain.c | 14 ++++++++++---- xen/include/xen/sched.h | 2 +- 2 files changed, 11 insertions(+), 5 deletions(-)