@@ -1018,7 +1018,7 @@ int domain_relinquish_resources(struct domain *d)
/* Fallthrough */
case RELMEM_p2m:
- ret = p2m_teardown(d);
+ ret = p2m_teardown(d, true);
if ( ret )
return ret;
@@ -1629,7 +1629,7 @@ static void p2m_free_vmid(struct domain *d)
spin_unlock(&vmid_alloc_lock);
}
-int p2m_teardown(struct domain *d)
+int p2m_teardown(struct domain *d, bool allow_preemption)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
unsigned long count = 0;
@@ -1637,6 +1637,9 @@ int p2m_teardown(struct domain *d)
unsigned int i;
int rc = 0;
+ if ( page_list_empty(&p2m->pages) )
+ return 0;
+
p2m_write_lock(p2m);
/*
@@ -1660,7 +1663,7 @@ int p2m_teardown(struct domain *d)
p2m_free_page(p2m->domain, pg);
count++;
/* Arbitrarily preempt every 512 iterations */
- if ( !(count % 512) && hypercall_preempt_check() )
+ if ( allow_preemption && !(count % 512) && hypercall_preempt_check() )
{
rc = -ERESTART;
break;
@@ -1680,7 +1683,20 @@ void p2m_final_teardown(struct domain *d)
if ( !p2m->domain )
return;
+ /*
+ * No need to call relinquish_p2m_mapping() here because
+ * p2m_final_teardown() is called either after domain_relinquish_resources()
+ * where relinquish_p2m_mapping() has been called, or from failure path of
+ * domain_create()/arch_domain_create() where mappings that require
+ * p2m_put_l3_page() should never be created. For the latter case, also see
+ * comment on top of the p2m_set_entry() for more info.
+ */
+
+ BUG_ON(p2m_teardown(d, false));
ASSERT(page_list_empty(&p2m->pages));
+
+ while ( p2m_teardown_allocation(d) == -ERESTART )
+ continue; /* No preemption support here */
ASSERT(page_list_empty(&d->arch.paging.p2m_freelist));
if ( p2m->root )
@@ -1747,6 +1763,20 @@ int p2m_init(struct domain *d)
if ( rc )
return rc;
+ /*
+ * Hardware using GICv2 needs to create a P2M mapping of 8KB GICv2 area
+ * when the domain is created. Considering the worst case for page
+ * tables and keep a buffer, populate 16 pages to the P2M pages pool here.
+ * For GICv3, the above-mentioned P2M mapping is not necessary, but since
+ * the allocated 16 pages here would not be lost, hence populate these
+ * pages unconditionally.
+ */
+ spin_lock(&d->arch.paging.lock);
+ rc = p2m_set_allocation(d, 16, NULL);
+ spin_unlock(&d->arch.paging.lock);
+ if ( rc )
+ return rc;
+
return 0;
}
@@ -173,14 +173,18 @@ int p2m_init(struct domain *d);
/*
* The P2M resources are freed in two parts:
- * - p2m_teardown() will be called when relinquish the resources. It
- * will free large resources (e.g. intermediate page-tables) that
- * requires preemption.
+ * - p2m_teardown() will be called preemptively when relinquish the
+ * resources, in which case it will free large resources (e.g. intermediate
+ * page-tables) that requires preemption.
* - p2m_final_teardown() will be called when domain struct is been
* freed. This *cannot* be preempted and therefore one small
* resources should be freed here.
+ * Note that p2m_final_teardown() will also call p2m_teardown(), to properly
+ * free the P2M when failures happen in the domain creation with P2M pages
+ * already in use. In this case p2m_teardown() is called non-preemptively and
+ * p2m_teardown() will always return 0.
*/
-int p2m_teardown(struct domain *d);
+int p2m_teardown(struct domain *d, bool allow_preemption);
void p2m_final_teardown(struct domain *d);
/*
@@ -245,6 +249,8 @@ mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
/*
* Direct set a p2m entry: only for use by the P2M code.
* The P2M write lock should be taken.
+ * TODO: Add a check in __p2m_set_entry() to avoid creating a mapping in
+ * arch_domain_create() that requires p2m_put_l3_page() to be called.
*/
int p2m_set_entry(struct p2m_domain *p2m,
gfn_t sgfn,