@@ -569,7 +569,7 @@ int p2m_set_entry(struct p2m_domain *p2m
return rc;
}
-struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type)
+mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type)
{
struct page_info *pg;
@@ -577,13 +577,13 @@ struct page_info *p2m_alloc_ptp(struct p
ASSERT(p2m->domain);
ASSERT(p2m->domain->arch.paging.alloc_page);
pg = p2m->domain->arch.paging.alloc_page(p2m->domain);
- if (pg == NULL)
- return NULL;
+ if ( !pg )
+ return INVALID_MFN;
page_list_add_tail(pg, &p2m->pages);
pg->u.inuse.type_info = type | 1 | PGT_validated;
- return pg;
+ return page_to_mfn(pg);
}
void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg)
@@ -609,7 +609,7 @@ void p2m_free_ptp(struct p2m_domain *p2m
*/
int p2m_alloc_table(struct p2m_domain *p2m)
{
- struct page_info *p2m_top;
+ mfn_t top_mfn;
struct domain *d = p2m->domain;
int rc = 0;
@@ -632,14 +632,14 @@ int p2m_alloc_table(struct p2m_domain *p
P2M_PRINTK("allocating p2m table\n");
- p2m_top = p2m_alloc_ptp(p2m, PGT_l4_page_table);
- if ( p2m_top == NULL )
+ top_mfn = p2m_alloc_ptp(p2m, PGT_l4_page_table);
+ if ( mfn_eq(top_mfn, INVALID_MFN) )
{
p2m_unlock(p2m);
return -ENOMEM;
}
- p2m->phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
+ p2m->phys_table = pagetable_from_mfn(top_mfn);
if ( hap_enabled(d) )
iommu_share_p2m_table(d);
@@ -225,16 +225,16 @@ static void ept_p2m_type_to_flags(struct
/* Fill in middle levels of ept table */
static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry)
{
- struct page_info *pg;
+ mfn_t mfn;
ept_entry_t *table;
unsigned int i;
- pg = p2m_alloc_ptp(p2m, 0);
- if ( pg == NULL )
+ mfn = p2m_alloc_ptp(p2m, 0);
+ if ( mfn_eq(mfn, INVALID_MFN) )
return 0;
ept_entry->epte = 0;
- ept_entry->mfn = page_to_mfn(pg);
+ ept_entry->mfn = mfn_x(mfn);
ept_entry->access = p2m->default_access;
ept_entry->r = ept_entry->w = ept_entry->x = 1;
@@ -243,7 +243,7 @@ static int ept_set_middle_entry(struct p
ept_entry->suppress_ve = 1;
- table = __map_domain_page(pg);
+ table = map_domain_page(mfn);
for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
table[i].suppress_ve = 1;
@@ -204,14 +204,12 @@ p2m_next_level(struct p2m_domain *p2m, v
/* PoD/paging: Not present doesn't imply empty. */
if ( !flags )
{
- struct page_info *pg;
+ mfn_t mfn = p2m_alloc_ptp(p2m, type);
- pg = p2m_alloc_ptp(p2m, type);
- if ( pg == NULL )
+ if ( mfn_eq(mfn, INVALID_MFN) )
return -ENOMEM;
- new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
- P2M_BASE_FLAGS | _PAGE_RW);
+ new_entry = l1e_from_pfn(mfn_x(mfn), P2M_BASE_FLAGS | _PAGE_RW);
switch ( type ) {
case PGT_l3_page_table:
@@ -235,7 +233,7 @@ p2m_next_level(struct p2m_domain *p2m, v
{
/* Split superpages pages into smaller ones. */
unsigned long pfn = l1e_get_pfn(*p2m_entry);
- struct page_info *pg;
+ mfn_t mfn;
l1_pgentry_t *l1_entry;
unsigned int i, level;
@@ -263,11 +261,11 @@ p2m_next_level(struct p2m_domain *p2m, v
return -EINVAL;
}
- pg = p2m_alloc_ptp(p2m, type);
- if ( pg == NULL )
+ mfn = p2m_alloc_ptp(p2m, type);
+ if ( mfn_eq(mfn, INVALID_MFN) )
return -ENOMEM;
- l1_entry = __map_domain_page(pg);
+ l1_entry = map_domain_page(mfn);
/* Inherit original IOMMU permissions, but update Next Level. */
if ( iommu_hap_pt_share )
@@ -285,8 +283,7 @@ p2m_next_level(struct p2m_domain *p2m, v
unmap_domain_page(l1_entry);
- new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
- P2M_BASE_FLAGS | _PAGE_RW);
+ new_entry = l1e_from_pfn(mfn_x(mfn), P2M_BASE_FLAGS | _PAGE_RW);
p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
}
@@ -684,7 +684,7 @@ void p2m_mem_paging_resume(struct domain
* Internal functions, only called by other p2m code
*/
-struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
+mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg);
/* Directly set a p2m entry: only for use by p2m code. Does not need