@@ -1698,7 +1698,7 @@ void p2m_final_teardown(struct domain *d)
int p2m_init(struct domain *d)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
- int rc = 0;
+ int rc;
unsigned int cpu;
rwlock_init(&p2m->lock);
@@ -1707,11 +1707,6 @@ int p2m_init(struct domain *d)
INIT_PAGE_LIST_HEAD(&d->arch.paging.p2m_freelist);
p2m->vmid = INVALID_VMID;
-
- rc = p2m_alloc_vmid(d);
- if ( rc != 0 )
- return rc;
-
p2m->max_mapped_gfn = _gfn(0);
p2m->lowest_mapped_gfn = _gfn(ULONG_MAX);
@@ -1727,8 +1722,6 @@ int p2m_init(struct domain *d)
p2m->clean_pte = is_iommu_enabled(d) &&
!iommu_has_feature(d, IOMMU_FEAT_COHERENT_WALK);
- rc = p2m_alloc_table(d);
-
/*
* Make sure that the type chosen to is able to store the an vCPU ID
* between 0 and the maximum of virtual CPUS supported as long as
@@ -1741,13 +1734,20 @@ int p2m_init(struct domain *d)
p2m->last_vcpu_ran[cpu] = INVALID_VCPU_ID;
/*
- * Besides getting a domain when we only have the p2m in hand,
- * the back pointer to domain is also used in p2m_teardown()
- * as an end-of-initialization indicator.
+ * "Trivial" initialisation is now complete. Set the backpointer so
+ * p2m_teardown() and friends know to do something.
*/
p2m->domain = d;
- return rc;
+ rc = p2m_alloc_vmid(d);
+ if ( rc )
+ return rc;
+
+ rc = p2m_alloc_table(d);
+ if ( rc )
+ return rc;
+
+ return 0;
}
/*