@@ -1075,7 +1075,48 @@ int arch_set_info_guest(
goto out;
if ( v->vcpu_id == 0 )
+ {
+ /*
+ * In the restore case we need to deal with L4 pages which got
+ * initialized with m2p_strict still clear (and which hence lack the
+ * correct initial RO_MPT_VIRT_{START,END} L4 entry).
+ */
+ if ( d != current->domain && !VM_ASSIST(d, m2p_strict) &&
+ is_pv_domain(d) && !is_pv_32bit_domain(d) &&
+ test_bit(VMASST_TYPE_m2p_strict, &c.nat->vm_assist) &&
+ atomic_read(&d->arch.pv_domain.nr_l4_pages) )
+ {
+ bool_t done = 0;
+
+ spin_lock_recursive(&d->page_alloc_lock);
+
+ for ( i = 0; ; )
+ {
+ struct page_info *page = page_list_remove_head(&d->page_list);
+
+ if ( page_lock(page) )
+ {
+ if ( (page->u.inuse.type_info & PGT_type_mask) ==
+ PGT_l4_page_table )
+ done = !fill_ro_mpt(page_to_mfn(page));
+
+ page_unlock(page);
+ }
+
+ page_list_add_tail(page, &d->page_list);
+
+ if ( done || (!(++i & 0xff) && hypercall_preempt_check()) )
+ break;
+ }
+
+ spin_unlock_recursive(&d->page_alloc_lock);
+
+ if ( !done )
+ return -ERESTART;
+ }
+
d->vm_assist = c(vm_assist);
+ }
rc = put_old_guest_table(current);
if ( rc )
@@ -1463,13 +1463,20 @@ void init_guest_l4_table(l4_pgentry_t l4
l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
}
-void fill_ro_mpt(unsigned long mfn)
+bool_t fill_ro_mpt(unsigned long mfn)
{
l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
+ bool_t ret = 0;
- l4tab[l4_table_offset(RO_MPT_VIRT_START)] =
- idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
+ if ( !l4e_get_intpte(l4tab[l4_table_offset(RO_MPT_VIRT_START)]) )
+ {
+ l4tab[l4_table_offset(RO_MPT_VIRT_START)] =
+ idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
+ ret = 1;
+ }
unmap_domain_page(l4tab);
+
+ return ret;
}
void zap_ro_mpt(unsigned long mfn)
@@ -1527,10 +1534,15 @@ static int alloc_l4_table(struct page_in
adjust_guest_l4e(pl4e[i], d);
}
- init_guest_l4_table(pl4e, d, !VM_ASSIST(d, m2p_strict));
+ if ( rc >= 0 )
+ {
+ init_guest_l4_table(pl4e, d, !VM_ASSIST(d, m2p_strict));
+ atomic_inc(&d->arch.pv_domain.nr_l4_pages);
+ rc = 0;
+ }
unmap_domain_page(pl4e);
- return rc > 0 ? 0 : rc;
+ return rc;
}
static void free_l1_table(struct page_info *page)
@@ -1648,7 +1660,13 @@ static int free_l4_table(struct page_inf
unmap_domain_page(pl4e);
- return rc > 0 ? 0 : rc;
+ if ( rc >= 0 )
+ {
+ atomic_dec(&d->arch.pv_domain.nr_l4_pages);
+ rc = 0;
+ }
+
+ return rc;
}
int page_lock(struct page_info *page)
@@ -248,6 +248,8 @@ struct pv_domain
{
l1_pgentry_t **gdt_ldt_l1tab;
+ atomic_t nr_l4_pages;
+
/* map_domain_page() mapping cache. */
struct mapcache_domain mapcache;
};
@@ -322,7 +322,7 @@ int free_page_type(struct page_info *pag
void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
bool_t zap_ro_mpt);
-void fill_ro_mpt(unsigned long mfn);
+bool_t fill_ro_mpt(unsigned long mfn);
void zap_ro_mpt(unsigned long mfn);
int is_iomem_page(unsigned long mfn);