@@ -692,8 +692,7 @@ int map_ldt_shadow_page(unsigned int off)
return 0;
}
- nl1e = l1e_from_pfn(mfn_x(page_to_mfn(page)),
- l1e_get_flags(l1e) | _PAGE_RW);
+ nl1e = l1e_from_page(page, l1e_get_flags(l1e) | _PAGE_RW);
spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock);
l1e_write(&gdt_ldt_ptes(d, v)[off + 16], nl1e);
@@ -1315,7 +1314,7 @@ static int put_page_from_l2e(l2_pgentry_t l2e, unsigned long pfn)
if ( l2e_get_flags(l2e) & _PAGE_PSE )
{
- struct page_info *page = mfn_to_page(_mfn(l2e_get_pfn(l2e)));
+ struct page_info *page = l2e_get_page(l2e);
unsigned int i;
for ( i = 0; i < (1u << PAGETABLE_ORDER); i++, page++ )
@@ -1944,7 +1943,7 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e,
page = get_page_from_gfn(pg_dom, l1e_get_pfn(nl1e), NULL, P2M_ALLOC);
if ( !page )
return -EINVAL;
- nl1e = l1e_from_pfn(mfn_x(page_to_mfn(page)), l1e_get_flags(nl1e));
+ nl1e = l1e_from_page(page, l1e_get_flags(nl1e));
}
/* Fast path for sufficiently-similar mappings. */
@@ -405,8 +405,7 @@ static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
/* Install the per-domain mappings for this domain */
l4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
- l4e_from_pfn(mfn_x(page_to_mfn(d->arch.perdomain_l3_pg)),
- __PAGE_HYPERVISOR_RW);
+ l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
/* Install a linear mapping */
l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
@@ -168,7 +168,7 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
unmap_domain_page(l3_table);
}
- p2m_free_ptp(p2m, mfn_to_page(_mfn(l1e_get_pfn(*p2m_entry))));
+ p2m_free_ptp(p2m, l1e_get_page(*p2m_entry));
}
// Walk one level of the P2M table, allocating a new table if required.
@@ -210,8 +210,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
if ( pg == NULL )
return -ENOMEM;
- new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
- P2M_BASE_FLAGS | _PAGE_RW);
+ new_entry = l1e_from_page(pg, P2M_BASE_FLAGS | _PAGE_RW);
switch ( type ) {
case PGT_l3_page_table:
@@ -255,8 +254,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 2);
}
unmap_domain_page(l1_entry);
- new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
- P2M_BASE_FLAGS | _PAGE_RW); /* disable PSE */
+ new_entry = l1e_from_page(pg, P2M_BASE_FLAGS | _PAGE_RW); /* disable PSE */
p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable);
p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3);
}
@@ -290,8 +288,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
}
unmap_domain_page(l1_entry);
- new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
- P2M_BASE_FLAGS | _PAGE_RW);
+ new_entry = l1e_from_page(pg, P2M_BASE_FLAGS | _PAGE_RW);
p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
}
No functional change (confirmed by diffing the disassembly). Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> CC: Wei Liu <wei.liu2@citrix.com> CC: Tim Deegan <tim@xen.org> CC: George Dunlap <george.dunlap@eu.citrix.com> --- xen/arch/x86/mm.c | 7 +++---- xen/arch/x86/mm/hap/hap.c | 3 +-- xen/arch/x86/mm/p2m-pt.c | 11 ++++------- 3 files changed, 8 insertions(+), 13 deletions(-)