@@ -73,6 +73,8 @@
DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
+static DEFINE_PER_CPU(bool, full_gdt_loaded);
+
static void default_idle(void);
void (*pm_idle) (void) __read_mostly = default_idle;
void (*dead_idle) (void) __read_mostly = default_dead_idle;
@@ -1614,6 +1616,41 @@ static inline bool need_full_gdt(const struct domain *d)
return is_pv_domain(d) && !is_idle_domain(d);
}
+static inline void write_full_gdt_ptes(seg_desc_t *gdt, struct vcpu *v)
+{
+ unsigned long mfn = virt_to_mfn(gdt);
+ l1_pgentry_t *pl1e = pv_gdt_ptes(v);
+ unsigned int i;
+
+ for ( i = 0; i < NR_RESERVED_GDT_PAGES; i++ )
+ l1e_write(pl1e + FIRST_RESERVED_GDT_PAGE + i,
+ l1e_from_pfn(mfn + i, __PAGE_HYPERVISOR_RW));
+}
+
+static inline void load_full_gdt(struct vcpu *v, unsigned int cpu)
+{
+ struct desc_ptr gdt_desc;
+
+ gdt_desc.limit = LAST_RESERVED_GDT_BYTE;
+ gdt_desc.base = GDT_VIRT_START(v);
+
+ lgdt(&gdt_desc);
+
+ per_cpu(full_gdt_loaded, cpu) = true;
+}
+
+static inline void load_default_gdt(seg_desc_t *gdt, unsigned int cpu)
+{
+ struct desc_ptr gdt_desc;
+
+ gdt_desc.limit = LAST_RESERVED_GDT_BYTE;
+ gdt_desc.base = (unsigned long)(gdt - FIRST_RESERVED_GDT_ENTRY);
+
+ lgdt(&gdt_desc);
+
+ per_cpu(full_gdt_loaded, cpu) = false;
+}
+
static void __context_switch(void)
{
struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
@@ -1622,7 +1659,7 @@ static void __context_switch(void)
struct vcpu *n = current;
struct domain *pd = p->domain, *nd = n->domain;
seg_desc_t *gdt;
- struct desc_ptr gdt_desc;
+ bool need_full_gdt_n;
ASSERT(p != n);
ASSERT(!vcpu_cpu_dirty(n));
@@ -1664,25 +1701,15 @@ static void __context_switch(void)
gdt = !is_pv_32bit_domain(nd) ? per_cpu(gdt_table, cpu) :
per_cpu(compat_gdt_table, cpu);
- if ( need_full_gdt(nd) )
- {
- unsigned long mfn = virt_to_mfn(gdt);
- l1_pgentry_t *pl1e = pv_gdt_ptes(n);
- unsigned int i;
- for ( i = 0; i < NR_RESERVED_GDT_PAGES; i++ )
- l1e_write(pl1e + FIRST_RESERVED_GDT_PAGE + i,
- l1e_from_pfn(mfn + i, __PAGE_HYPERVISOR_RW));
- }
+ need_full_gdt_n = need_full_gdt(nd);
- if ( need_full_gdt(pd) &&
- ((p->vcpu_id != n->vcpu_id) || !need_full_gdt(nd)) )
- {
- gdt_desc.limit = LAST_RESERVED_GDT_BYTE;
- gdt_desc.base = (unsigned long)(gdt - FIRST_RESERVED_GDT_ENTRY);
+ if ( need_full_gdt_n )
+ write_full_gdt_ptes(gdt, n);
- lgdt(&gdt_desc);
- }
+ if ( per_cpu(full_gdt_loaded, cpu) &&
+ ((p->vcpu_id != n->vcpu_id) || !need_full_gdt_n) )
+ load_default_gdt(gdt, cpu);
write_ptbase(n);
@@ -1693,14 +1720,8 @@ static void __context_switch(void)
svm_load_segs(0, 0, 0, 0, 0, 0, 0);
#endif
- if ( need_full_gdt(nd) &&
- ((p->vcpu_id != n->vcpu_id) || !need_full_gdt(pd)) )
- {
- gdt_desc.limit = LAST_RESERVED_GDT_BYTE;
- gdt_desc.base = GDT_VIRT_START(n);
-
- lgdt(&gdt_desc);
- }
+ if ( need_full_gdt_n && !per_cpu(full_gdt_loaded, cpu) )
+ load_full_gdt(n, cpu);
if ( pd != nd )
cpumask_clear_cpu(cpu, pd->dirty_cpumask);
In preparation for core scheduling carve out the GDT related functionality (writing GDT related PTEs, loading default of full GDT) into sub-functions. Instead of dynamically decide whether the previous vcpu was using full or default GDT just add a percpu variable for that purpose. This at once removes the need for testing vcpu_ids to differ twice. Cache the need_full_gdt(nd) value in a local variable. Signed-off-by: Juergen Gross <jgross@suse.com> --- xen/arch/x86/domain.c | 71 +++++++++++++++++++++++++++++++++------------------ 1 file changed, 46 insertions(+), 25 deletions(-)