@@ -69,6 +69,7 @@
#include <asm/pv/domain.h>
#include <asm/pv/mm.h>
#include <asm/spec_ctrl.h>
+#include <asm/setup.h>
DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
@@ -1580,12 +1581,20 @@ void paravirt_ctxt_switch_from(struct vcpu *v)
void paravirt_ctxt_switch_to(struct vcpu *v)
{
- root_pgentry_t *root_pgt = this_cpu(root_pgt);
+ mfn_t rpt_mfn = this_cpu(root_pgt_mfn);
- if ( root_pgt )
- root_pgt[root_table_offset(PERDOMAIN_VIRT_START)] =
+ if ( !mfn_eq(rpt_mfn, INVALID_MFN) )
+ {
+ root_pgentry_t *rpt;
+
+ mapcache_override_current(INVALID_VCPU);
+ rpt = map_xen_pagetable_new(rpt_mfn);
+ rpt[root_table_offset(PERDOMAIN_VIRT_START)] =
l4e_from_page(v->domain->arch.perdomain_l3_pg,
__PAGE_HYPERVISOR_RW);
+ UNMAP_XEN_PAGETABLE_NEW(rpt);
+ mapcache_override_current(NULL);
+ }
if ( unlikely(v->arch.dr7 & DR7_ACTIVE_MASK) )
activate_debugregs(v);
@@ -57,7 +57,7 @@ static inline struct vcpu *mapcache_current_vcpu(void)
return v;
}
-void __init mapcache_override_current(struct vcpu *v)
+void mapcache_override_current(struct vcpu *v)
{
this_cpu(override) = v;
}
@@ -530,7 +530,7 @@ void write_ptbase(struct vcpu *v)
if ( is_pv_vcpu(v) && v->domain->arch.pv.xpti )
{
cpu_info->root_pgt_changed = true;
- cpu_info->pv_cr3 = __pa(this_cpu(root_pgt));
+ cpu_info->pv_cr3 = mfn_to_maddr(this_cpu(root_pgt_mfn));
if ( new_cr4 & X86_CR4_PCIDE )
cpu_info->pv_cr3 |= get_pcid_bits(v, true);
switch_cr3_cr4(v->arch.cr3, new_cr4);
@@ -360,7 +360,7 @@ static void _toggle_guest_pt(struct vcpu *v)
if ( d->arch.pv.xpti )
{
cpu_info->root_pgt_changed = true;
- cpu_info->pv_cr3 = __pa(this_cpu(root_pgt)) |
+ cpu_info->pv_cr3 = mfn_to_maddr(this_cpu(root_pgt_mfn)) |
(d->arch.pv.pcid ? get_pcid_bits(v, true) : 0);
}
@@ -813,7 +813,7 @@ static int clone_mapping(const void *ptr, root_pgentry_t *rpt)
return rc;
}
-DEFINE_PER_CPU(root_pgentry_t *, root_pgt);
+DEFINE_PER_CPU(mfn_t, root_pgt_mfn);
static root_pgentry_t common_pgt;
@@ -821,19 +821,27 @@ extern const char _stextentry[], _etextentry[];
static int setup_cpu_root_pgt(unsigned int cpu)
{
- root_pgentry_t *rpt;
+ root_pgentry_t *rpt = NULL;
+ mfn_t rpt_mfn;
unsigned int off;
int rc;
if ( !opt_xpti_hwdom && !opt_xpti_domu )
- return 0;
+ {
+ rc = 0;
+ goto out;
+ }
- rpt = alloc_xen_pagetable();
- if ( !rpt )
- return -ENOMEM;
+ rpt_mfn = alloc_xen_pagetable_new();
+ if ( mfn_eq(rpt_mfn, INVALID_MFN) )
+ {
+ rc = -ENOMEM;
+ goto out;
+ }
+ rpt = map_xen_pagetable_new(rpt_mfn);
clear_page(rpt);
- per_cpu(root_pgt, cpu) = rpt;
+ per_cpu(root_pgt_mfn, cpu) = rpt_mfn;
rpt[root_table_offset(RO_MPT_VIRT_START)] =
idle_pg_table[root_table_offset(RO_MPT_VIRT_START)];
@@ -850,7 +858,7 @@ static int setup_cpu_root_pgt(unsigned int cpu)
rc = clone_mapping(ptr, rpt);
if ( rc )
- return rc;
+ goto out;
common_pgt = rpt[root_table_offset(XEN_VIRT_START)];
}
@@ -875,19 +883,24 @@ static int setup_cpu_root_pgt(unsigned int cpu)
if ( !rc )
rc = clone_mapping((void *)per_cpu(stubs.addr, cpu), rpt);
+ out:
+ UNMAP_XEN_PAGETABLE_NEW(rpt);
return rc;
}
static void cleanup_cpu_root_pgt(unsigned int cpu)
{
- root_pgentry_t *rpt = per_cpu(root_pgt, cpu);
+ mfn_t rpt_mfn = per_cpu(root_pgt_mfn, cpu);
+ root_pgentry_t *rpt;
unsigned int r;
unsigned long stub_linear = per_cpu(stubs.addr, cpu);
- if ( !rpt )
+ if ( mfn_eq(rpt_mfn, INVALID_MFN) )
return;
- per_cpu(root_pgt, cpu) = NULL;
+ per_cpu(root_pgt_mfn, cpu) = INVALID_MFN;
+
+ rpt = map_xen_pagetable_new(rpt_mfn);
for ( r = root_table_offset(DIRECTMAP_VIRT_START);
r < root_table_offset(HYPERVISOR_VIRT_END); ++r )
@@ -932,7 +945,8 @@ static void cleanup_cpu_root_pgt(unsigned int cpu)
free_xen_pagetable_new(l3t_mfn);
}
- free_xen_pagetable(rpt);
+ UNMAP_XEN_PAGETABLE_NEW(rpt);
+ free_xen_pagetable_new(rpt_mfn);
/* Also zap the stub mapping for this CPU. */
if ( stub_linear )
@@ -1136,7 +1150,7 @@ void __init smp_prepare_cpus(void)
rc = setup_cpu_root_pgt(0);
if ( rc )
panic("Error %d setting up PV root page table\n", rc);
- if ( per_cpu(root_pgt, 0) )
+ if ( !mfn_eq(per_cpu(root_pgt_mfn, 0), INVALID_MFN) )
{
get_cpu_info()->pv_cr3 = 0;
@@ -646,4 +646,6 @@ void free_xen_pagetable_new(mfn_t mfn);
l1_pgentry_t *virt_to_xen_l1e(unsigned long v);
+DECLARE_PER_CPU(mfn_t, root_pgt_mfn);
+
#endif /* __ASM_X86_MM_H__ */
@@ -465,7 +465,7 @@ static inline void disable_each_ist(idt_entry_t *idt)
extern idt_entry_t idt_table[];
extern idt_entry_t *idt_tables[];
-DECLARE_PER_CPU(root_pgentry_t *, root_pgt);
+DECLARE_PER_CPU(struct tss_struct, init_tss);
extern void write_ptbase(struct vcpu *v);