@@ -20,10 +20,6 @@
#include <asm/kernel.h>
#include <asm/domain_build.h>
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
#define ACPI_DOM0_FDT_MIN_SIZE 4096
static int __init acpi_iomem_deny_access(struct domain *d)
@@ -32,10 +32,6 @@
#include <asm/insn.h>
#include <asm/page.h>
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
extern const struct alt_instr __alt_instructions[], __alt_instructions_end[];
struct alt_region {
@@ -14,10 +14,6 @@
#include <asm/insn.h>
#include <asm/psci.h>
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
/* Hardening Branch predictor code for Arm64 */
#ifdef CONFIG_ARM64_HARDEN_BRANCH_PREDICTOR
@@ -52,10 +52,6 @@ struct map_range_data
p2m_type_t p2mt;
};
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
//#define DEBUG_11_ALLOCATION
#ifdef DEBUG_11_ALLOCATION
# define D11PRINT(fmt, args...) printk(XENLOG_DEBUG fmt, ##args)
@@ -12,10 +12,6 @@
#include <asm/livepatch.h>
#include <asm/mm.h>
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
void *vmap_of_xen_text;
int arch_livepatch_safety_check(void)
@@ -43,12 +43,6 @@
#include <asm/setup.h>
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-#undef mfn_to_virt
-#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))
-
#ifdef NDEBUG
static inline void
__attribute__ ((__format__ (__printf__, 1, 2)))
@@ -835,7 +829,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
* Virtual address aligned to previous 1GB to match physical
* address alignment done above.
*/
- vaddr = (vaddr_t)__mfn_to_virt(base_mfn) & FIRST_MASK;
+ vaddr = (vaddr_t)mfn_to_virt(_mfn(base_mfn)) & FIRST_MASK;
while ( mfn < end_mfn )
{
@@ -78,17 +78,17 @@ void *map_domain_page(mfn_t mfn)
#ifdef NDEBUG
if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
- return mfn_to_virt(mfn_x(mfn));
+ return mfn_to_virt(mfn);
#endif
v = mapcache_current_vcpu();
if ( !v || !is_pv_vcpu(v) )
- return mfn_to_virt(mfn_x(mfn));
+ return mfn_to_virt(mfn);
dcache = &v->domain->arch.pv.mapcache;
vcache = &v->arch.pv.mapcache;
if ( !dcache->inuse )
- return mfn_to_virt(mfn_x(mfn));
+ return mfn_to_virt(mfn);
perfc_incr(map_domain_page_count);
@@ -311,7 +311,7 @@ void *map_domain_page_global(mfn_t mfn)
#ifdef NDEBUG
if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
- return mfn_to_virt(mfn_x(mfn));
+ return mfn_to_virt(mfn);
#endif
return vmap(&mfn, 1);
@@ -336,7 +336,7 @@ mfn_t domain_page_map_to_mfn(const void *ptr)
const l1_pgentry_t *pl1e;
if ( va >= DIRECTMAP_VIRT_START )
- return _mfn(virt_to_mfn(ptr));
+ return virt_to_mfn(ptr);
if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END )
{
@@ -444,31 +444,32 @@ static int __init pvh_populate_p2m(struct domain *d)
/* Populate memory map. */
for ( i = 0; i < d->arch.nr_e820; i++ )
{
- unsigned long addr, size;
+ mfn_t mfn;
+ unsigned long size;
if ( d->arch.e820[i].type != E820_RAM )
continue;
- addr = PFN_DOWN(d->arch.e820[i].addr);
+ mfn = maddr_to_mfn(d->arch.e820[i].addr);
size = PFN_DOWN(d->arch.e820[i].size);
- rc = pvh_populate_memory_range(d, addr, size);
+ rc = pvh_populate_memory_range(d, mfn_x(mfn), size);
if ( rc )
return rc;
- if ( addr < MB1_PAGES )
+ if ( mfn_x(mfn) < MB1_PAGES )
{
uint64_t end = min_t(uint64_t, MB(1),
d->arch.e820[i].addr + d->arch.e820[i].size);
enum hvm_translation_result res =
- hvm_copy_to_guest_phys(mfn_to_maddr(_mfn(addr)),
- mfn_to_virt(addr),
+ hvm_copy_to_guest_phys(mfn_to_maddr(mfn),
+ mfn_to_virt(mfn),
d->arch.e820[i].addr - end,
v);
if ( res != HVMTRANS_okay )
- printk("Failed to copy [%#lx, %#lx): %d\n",
- addr, addr + size, res);
+ printk("Failed to copy [%"PRI_mfn", %"PRI_mfn"): %d\n",
+ mfn_x(mfn), mfn_x(mfn_add(mfn, size)), res);
}
}
@@ -607,7 +608,8 @@ static int __init pvh_load_kernel(struct domain *d, const module_t *image,
if ( initrd != NULL )
{
- rc = hvm_copy_to_guest_phys(last_addr, mfn_to_virt(initrd->mod_start),
+ rc = hvm_copy_to_guest_phys(last_addr,
+ mfn_to_virt(_mfn(initrd->mod_start)),
initrd->mod_end, v);
if ( rc )
{
@@ -152,10 +152,6 @@
#include "pv/mm.h"
#endif
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(v) _mfn(__virt_to_mfn(v))
-
/* Mapping of the fixmap space needed early. */
l1_pgentry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
l1_fixmap[L1_PAGETABLE_ENTRIES];
@@ -323,8 +319,8 @@ void __init arch_init_memory(void)
iostart_pfn = max_t(unsigned long, pfn, 1UL << (20 - PAGE_SHIFT));
ioend_pfn = min(rstart_pfn, 16UL << (20 - PAGE_SHIFT));
if ( iostart_pfn < ioend_pfn )
- destroy_xen_mappings((unsigned long)mfn_to_virt(iostart_pfn),
- (unsigned long)mfn_to_virt(ioend_pfn));
+ destroy_xen_mappings((unsigned long)mfn_to_virt(_mfn(iostart_pfn)),
+ (unsigned long)mfn_to_virt(_mfn(ioend_pfn)));
/* Mark as I/O up to next RAM region. */
for ( ; pfn < rstart_pfn; pfn++ )
@@ -785,21 +781,21 @@ bool is_iomem_page(mfn_t mfn)
return (page_get_owner(page) == dom_io);
}
-static int update_xen_mappings(unsigned long mfn, unsigned int cacheattr)
+static int update_xen_mappings(mfn_t mfn, unsigned int cacheattr)
{
int err = 0;
- bool alias = mfn >= PFN_DOWN(xen_phys_start) &&
- mfn < PFN_UP(xen_phys_start + xen_virt_end - XEN_VIRT_START);
+ bool alias = mfn_x(mfn) >= PFN_DOWN(xen_phys_start) &&
+ mfn_x(mfn) < PFN_UP(xen_phys_start + xen_virt_end - XEN_VIRT_START);
unsigned long xen_va =
- XEN_VIRT_START + ((mfn - PFN_DOWN(xen_phys_start)) << PAGE_SHIFT);
+ XEN_VIRT_START + mfn_to_maddr(mfn_add(mfn, -PFN_DOWN(xen_phys_start)));
if ( unlikely(alias) && cacheattr )
- err = map_pages_to_xen(xen_va, _mfn(mfn), 1, 0);
+ err = map_pages_to_xen(xen_va, mfn, 1, 0);
if ( !err )
- err = map_pages_to_xen((unsigned long)mfn_to_virt(mfn), _mfn(mfn), 1,
+ err = map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));
if ( unlikely(alias) && !cacheattr && !err )
- err = map_pages_to_xen(xen_va, _mfn(mfn), 1, PAGE_HYPERVISOR);
+ err = map_pages_to_xen(xen_va, mfn, 1, PAGE_HYPERVISOR);
return err;
}
@@ -1029,7 +1025,7 @@ get_page_from_l1e(
nx = (x & ~PGC_cacheattr_mask) | (cacheattr << PGC_cacheattr_base);
} while ( (y = cmpxchg(&page->count_info, x, nx)) != x );
- err = update_xen_mappings(mfn, cacheattr);
+ err = update_xen_mappings(_mfn(mfn), cacheattr);
if ( unlikely(err) )
{
cacheattr = y & PGC_cacheattr_mask;
@@ -2449,7 +2445,7 @@ static int cleanup_page_mappings(struct page_info *page)
BUG_ON(is_xen_heap_page(page));
- rc = update_xen_mappings(mfn, 0);
+ rc = update_xen_mappings(_mfn(mfn), 0);
}
/*
@@ -4950,7 +4946,7 @@ void *alloc_xen_pagetable(void)
{
mfn_t mfn = alloc_xen_pagetable_new();
- return mfn_eq(mfn, INVALID_MFN) ? NULL : mfn_to_virt(mfn_x(mfn));
+ return mfn_eq(mfn, INVALID_MFN) ? NULL : mfn_to_virt(mfn);
}
void free_xen_pagetable(void *v)
@@ -4983,7 +4979,7 @@ mfn_t alloc_xen_pagetable_new(void)
void free_xen_pagetable_new(mfn_t mfn)
{
if ( system_state != SYS_STATE_early_boot && !mfn_eq(mfn, INVALID_MFN) )
- free_xenheap_page(mfn_to_virt(mfn_x(mfn)));
+ free_xenheap_page(mfn_to_virt(mfn));
}
static DEFINE_SPINLOCK(map_pgdir_lock);
@@ -100,14 +100,12 @@ static int __init populate_memnodemap(const struct node *nodes,
static int __init allocate_cachealigned_memnodemap(void)
{
unsigned long size = PFN_UP(memnodemapsize * sizeof(*memnodemap));
- unsigned long mfn = mfn_x(alloc_boot_pages(size, 1));
+ mfn_t mfn = alloc_boot_pages(size, 1);
memnodemap = mfn_to_virt(mfn);
- mfn <<= PAGE_SHIFT;
- size <<= PAGE_SHIFT;
printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
- mfn, mfn + size);
- memnodemapsize = size / sizeof(*memnodemap);
+ mfn_to_maddr(mfn), mfn_to_maddr(mfn_add(mfn, size)));
+ memnodemapsize = (size << PAGE_SHIFT) / sizeof(*memnodemap);
return 0;
}
@@ -76,7 +76,7 @@ bool pv_destroy_ldt(struct vcpu *v)
void pv_destroy_gdt(struct vcpu *v)
{
l1_pgentry_t *pl1e = pv_gdt_ptes(v);
- mfn_t zero_mfn = _mfn(virt_to_mfn(zero_page));
+ mfn_t zero_mfn = virt_to_mfn(zero_page);
l1_pgentry_t zero_l1e = l1e_from_mfn(zero_mfn, __PAGE_HYPERVISOR_RO);
unsigned int i;
@@ -523,7 +523,7 @@ int __init dom0_construct_pv(struct domain *d,
free_domheap_pages(page, order);
page += 1UL << order;
}
- memcpy(page_to_virt(page), mfn_to_virt(initrd->mod_start),
+ memcpy(page_to_virt(page), mfn_to_virt(_mfn(initrd->mod_start)),
initrd_len);
mpt_alloc = (paddr_t)initrd->mod_start << PAGE_SHIFT;
init_domheap_pages(mpt_alloc,
@@ -601,7 +601,7 @@ int __init dom0_construct_pv(struct domain *d,
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
clear_page(l4tab);
- init_xen_l4_slots(l4tab, _mfn(virt_to_mfn(l4start)),
+ init_xen_l4_slots(l4tab, virt_to_mfn(l4start),
d, INVALID_MFN, true);
v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
}
@@ -39,9 +39,6 @@
#include <compat/grant_table.h>
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
#ifdef CONFIG_PV_SHIM_EXCLUSIVE
/* Tolerate "pv-shim" being passed to a CONFIG_PV_SHIM_EXCLUSIVE hypervisor. */
ignore_param("pv-shim");
@@ -340,7 +340,7 @@ void *__init bootstrap_map(const module_t *mod)
void *ret;
if ( system_state != SYS_STATE_early_boot )
- return mod ? mfn_to_virt(mod->mod_start) : NULL;
+ return mod ? mfn_to_virt(_mfn(mod->mod_start)) : NULL;
if ( !mod )
{
@@ -1005,7 +1005,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
* This needs to remain in sync with xen_in_range() and the
* respective reserve_e820_ram() invocation below.
*/
- mod[mbi->mods_count].mod_start = virt_to_mfn(_stext);
+ mod[mbi->mods_count].mod_start = mfn_x(virt_to_mfn(_stext));
mod[mbi->mods_count].mod_end = __2M_rwdata_end - _stext;
}
@@ -1404,7 +1404,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
{
set_pdx_range(mod[i].mod_start,
mod[i].mod_start + PFN_UP(mod[i].mod_end));
- map_pages_to_xen((unsigned long)mfn_to_virt(mod[i].mod_start),
+ map_pages_to_xen((unsigned long)mfn_to_virt(_mfn(mod[i].mod_start)),
_mfn(mod[i].mod_start),
PFN_UP(mod[i].mod_end), PAGE_HYPERVISOR);
}
@@ -1494,9 +1494,9 @@ void __init noreturn __start_xen(unsigned long mbi_p)
numa_initmem_init(0, raw_max_page);
- if ( max_page - 1 > virt_to_mfn(HYPERVISOR_VIRT_END - 1) )
+ if ( max_page - 1 > mfn_x(virt_to_mfn(HYPERVISOR_VIRT_END - 1)) )
{
- unsigned long limit = virt_to_mfn(HYPERVISOR_VIRT_END - 1);
+ unsigned long limit = mfn_x(virt_to_mfn(HYPERVISOR_VIRT_END - 1));
uint64_t mask = PAGE_SIZE - 1;
if ( !highmem_start )
@@ -996,7 +996,7 @@ static int cpu_smpboot_alloc(unsigned int cpu)
goto out;
per_cpu(gdt, cpu) = gdt;
per_cpu(gdt_l1e, cpu) =
- l1e_from_pfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
+ l1e_from_mfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
memcpy(gdt, boot_gdt, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
BUILD_BUG_ON(NR_CPUS > 0x10000);
gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
@@ -1005,7 +1005,7 @@ static int cpu_smpboot_alloc(unsigned int cpu)
if ( gdt == NULL )
goto out;
per_cpu(compat_gdt_l1e, cpu) =
- l1e_from_pfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
+ l1e_from_mfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
memcpy(gdt, boot_compat_gdt, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
@@ -196,7 +196,7 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
return;
}
mfn = alloc_boot_pages(PFN_UP(slit->header.length), 1);
- acpi_slit = mfn_to_virt(mfn_x(mfn));
+ acpi_slit = mfn_to_virt(mfn);
memcpy(acpi_slit, slit, slit->header.length);
}
@@ -260,7 +260,7 @@ static int mfn_in_guarded_stack(unsigned long mfn)
continue;
p = (void *)((unsigned long)stack_base[i] + STACK_SIZE -
PRIMARY_STACK_SIZE - PAGE_SIZE);
- if ( mfn == virt_to_mfn(p) )
+ if ( mfn_eq(_mfn(mfn), virt_to_mfn(p)) )
return -1;
}
@@ -296,7 +296,7 @@ static void tboot_gen_xenheap_integrity(const uint8_t key[TB_KEY_SIZE],
if ( mfn_in_guarded_stack(mfn) )
continue; /* skip guard stack, see memguard_guard_stack() in mm.c */
- pg = mfn_to_virt(mfn);
+ pg = mfn_to_virt(_mfn(mfn));
vmac_update((uint8_t *)pg, PAGE_SIZE, &ctx);
}
}
@@ -2029,9 +2029,9 @@ void __init trap_init(void)
/* Cache {,compat_}gdt_l1e now that physically relocation is done. */
this_cpu(gdt_l1e) =
- l1e_from_pfn(virt_to_mfn(boot_gdt), __PAGE_HYPERVISOR_RW);
+ l1e_from_mfn(virt_to_mfn(boot_gdt), __PAGE_HYPERVISOR_RW);
this_cpu(compat_gdt_l1e) =
- l1e_from_pfn(virt_to_mfn(boot_compat_gdt), __PAGE_HYPERVISOR_RW);
+ l1e_from_mfn(virt_to_mfn(boot_compat_gdt), __PAGE_HYPERVISOR_RW);
percpu_traps_init();
@@ -1369,11 +1369,12 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
return -EINVAL;
}
- i = virt_to_mfn(HYPERVISOR_VIRT_END - 1) + 1;
+ i = mfn_x(virt_to_mfn(HYPERVISOR_VIRT_END - 1)) + 1;
if ( spfn < i )
{
- ret = map_pages_to_xen((unsigned long)mfn_to_virt(spfn), _mfn(spfn),
- min(epfn, i) - spfn, PAGE_HYPERVISOR);
+ ret = map_pages_to_xen((unsigned long)mfn_to_virt(_mfn(spfn)),
+ _mfn(spfn), min(epfn, i) - spfn,
+ PAGE_HYPERVISOR);
if ( ret )
goto destroy_directmap;
}
@@ -1381,7 +1382,7 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
{
if ( i < spfn )
i = spfn;
- ret = map_pages_to_xen((unsigned long)mfn_to_virt(i), _mfn(i),
+ ret = map_pages_to_xen((unsigned long)mfn_to_virt(_mfn(i)), _mfn(i),
epfn - i, __PAGE_HYPERVISOR_RW);
if ( ret )
goto destroy_directmap;
@@ -1473,8 +1474,8 @@ destroy_frametable:
NODE_DATA(node)->node_start_pfn = old_node_start;
NODE_DATA(node)->node_spanned_pages = old_node_span;
destroy_directmap:
- destroy_xen_mappings((unsigned long)mfn_to_virt(spfn),
- (unsigned long)mfn_to_virt(epfn));
+ destroy_xen_mappings((unsigned long)mfn_to_virt(_mfn(spfn)),
+ (unsigned long)mfn_to_virt(_mfn(epfn)));
return ret;
}
@@ -196,7 +196,8 @@ void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
info->outstanding_pages = d->outstanding_pages;
info->shr_pages = atomic_read(&d->shr_pages);
info->paged_pages = atomic_read(&d->paged_pages);
- info->shared_info_frame = mfn_to_gmfn(d, virt_to_mfn(d->shared_info));
+ info->shared_info_frame = mfn_to_gmfn(d,
+ mfn_x(virt_to_mfn(d->shared_info)));
BUG_ON(SHARED_M2P(info->shared_info_frame));
info->cpupool = cpupool_get_id(d);
@@ -1447,7 +1447,7 @@ static __init void copy_mapping(unsigned long mfn, unsigned long end,
{
l4_pgentry_t l4e = efi_l4_pgtable[l4_table_offset(mfn << PAGE_SHIFT)];
l3_pgentry_t *l3src, *l3dst;
- unsigned long va = (unsigned long)mfn_to_virt(mfn);
+ unsigned long va = (unsigned long)mfn_to_virt(_mfn(mfn));
next = mfn + (1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT));
if ( !is_valid(mfn, min(next, end)) )
@@ -1562,9 +1562,10 @@ void __init efi_init_memory(void)
!(smfn & pfn_hole_mask) &&
!((smfn ^ (emfn - 1)) & ~pfn_pdx_bottom_mask) )
{
- if ( (unsigned long)mfn_to_virt(emfn - 1) >= HYPERVISOR_VIRT_END )
+ if ( (unsigned long)mfn_to_virt(_mfn(emfn - 1)) >=
+ HYPERVISOR_VIRT_END )
prot &= ~_PAGE_GLOBAL;
- if ( map_pages_to_xen((unsigned long)mfn_to_virt(smfn),
+ if ( map_pages_to_xen((unsigned long)mfn_to_virt(_mfn(smfn)),
_mfn(smfn), emfn - smfn, prot) == 0 )
desc->VirtualStart =
(unsigned long)maddr_to_virt(desc->PhysicalStart);
@@ -3935,8 +3935,8 @@ static int gnttab_get_status_frame_mfn(struct domain *d,
}
/* Make sure idx is bounded wrt nr_status_frames */
- *mfn = _mfn(virt_to_mfn(
- gt->status[array_index_nospec(idx, nr_status_frames(gt))]));
+ *mfn = virt_to_mfn(
+ gt->status[array_index_nospec(idx, nr_status_frames(gt))]);
return 0;
}
@@ -3966,8 +3966,8 @@ static int gnttab_get_shared_frame_mfn(struct domain *d,
}
/* Make sure idx is bounded wrt nr_status_frames */
- *mfn = _mfn(virt_to_mfn(
- gt->shared_raw[array_index_nospec(idx, nr_grant_frames(gt))]));
+ *mfn = virt_to_mfn(
+ gt->shared_raw[array_index_nospec(idx, nr_grant_frames(gt))]);
return 0;
}
@@ -565,7 +565,7 @@ static unsigned int __read_mostly xenheap_bits;
#define xenheap_bits 0
#endif
-static unsigned long init_node_heap(int node, unsigned long mfn,
+static unsigned long init_node_heap(int node, mfn_t mfn,
unsigned long nr, bool *use_tail)
{
/* First node to be discovered has its heap metadata statically alloced. */
@@ -584,21 +584,21 @@ static unsigned long init_node_heap(int node, unsigned long mfn,
needed = 0;
}
else if ( *use_tail && nr >= needed &&
- arch_mfn_in_directmap(mfn + nr) &&
+ arch_mfn_in_directmap(mfn_x(mfn_add(mfn, nr))) &&
(!xenheap_bits ||
- !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
+ !((mfn_x(mfn) + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
{
- _heap[node] = mfn_to_virt(mfn + nr - needed);
- avail[node] = mfn_to_virt(mfn + nr - 1) +
+ _heap[node] = mfn_to_virt(mfn_add(mfn, nr - needed));
+ avail[node] = mfn_to_virt(mfn_add(mfn, nr - 1)) +
PAGE_SIZE - sizeof(**avail) * NR_ZONES;
}
else if ( nr >= needed &&
- arch_mfn_in_directmap(mfn + needed) &&
+ arch_mfn_in_directmap(mfn_x(mfn_add(mfn, needed))) &&
(!xenheap_bits ||
- !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
+ !((mfn_x(mfn) + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
{
_heap[node] = mfn_to_virt(mfn);
- avail[node] = mfn_to_virt(mfn + needed - 1) +
+ avail[node] = mfn_to_virt(mfn_add(mfn, needed - 1)) +
PAGE_SIZE - sizeof(**avail) * NR_ZONES;
*use_tail = false;
}
@@ -1809,7 +1809,7 @@ static void init_heap_pages(
(find_first_set_bit(e) <= find_first_set_bit(s));
unsigned long n;
- n = init_node_heap(nid, mfn_x(page_to_mfn(pg + i)), nr_pages - i,
+ n = init_node_heap(nid, page_to_mfn(pg + i), nr_pages - i,
&use_tail);
BUG_ON(i + n > nr_pages);
if ( n && !use_tail )
@@ -218,7 +218,7 @@ static int alloc_trace_bufs(unsigned int pages)
t_info_mfn_list[offset + i] = 0;
goto out_dealloc;
}
- t_info_mfn_list[offset + i] = virt_to_mfn(p);
+ t_info_mfn_list[offset + i] = mfn_x(virt_to_mfn(p));
}
}
@@ -234,7 +234,8 @@ static int alloc_trace_bufs(unsigned int pages)
offset = t_info->mfn_offset[cpu];
/* Initialize the buffer metadata */
- per_cpu(t_bufs, cpu) = buf = mfn_to_virt(t_info_mfn_list[offset]);
+ buf = mfn_to_virt(_mfn(t_info_mfn_list[offset]));
+ per_cpu(t_bufs, cpu) = buf;
buf->cons = buf->prod = 0;
printk(XENLOG_INFO "xentrace: p%d mfn %x offset %u\n",
@@ -269,10 +270,10 @@ out_dealloc:
continue;
for ( i = 0; i < pages; i++ )
{
- uint32_t mfn = t_info_mfn_list[offset + i];
- if ( !mfn )
+ mfn_t mfn = _mfn(t_info_mfn_list[offset + i]);
+ if ( mfn_eq(mfn, _mfn(0)) )
break;
- ASSERT(!(mfn_to_page(_mfn(mfn))->count_info & PGC_allocated));
+ ASSERT(!(mfn_to_page(mfn)->count_info & PGC_allocated));
free_xenheap_pages(mfn_to_virt(mfn), 0);
}
}
@@ -378,7 +379,7 @@ int tb_control(struct xen_sysctl_tbuf_op *tbc)
{
case XEN_SYSCTL_TBUFOP_get_info:
tbc->evt_mask = tb_event_mask;
- tbc->buffer_mfn = t_info ? virt_to_mfn(t_info) : 0;
+ tbc->buffer_mfn = t_info ? mfn_x(virt_to_mfn(t_info)) : 0;
tbc->size = t_info_pages * PAGE_SIZE;
break;
case XEN_SYSCTL_TBUFOP_set_cpu_mask:
@@ -512,7 +513,7 @@ static unsigned char *next_record(const struct t_buf *buf, uint32_t *next,
uint16_t per_cpu_mfn_offset;
uint32_t per_cpu_mfn_nr;
uint32_t *mfn_list;
- uint32_t mfn;
+ mfn_t mfn;
unsigned char *this_page;
barrier(); /* must read buf->prod and buf->cons only once */
@@ -533,7 +534,7 @@ static unsigned char *next_record(const struct t_buf *buf, uint32_t *next,
per_cpu_mfn_nr = x >> PAGE_SHIFT;
per_cpu_mfn_offset = t_info->mfn_offset[smp_processor_id()];
mfn_list = (uint32_t *)t_info;
- mfn = mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr];
+ mfn = _mfn(mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr]);
this_page = mfn_to_virt(mfn);
if (per_cpu_mfn_nr + 1 >= opt_tbuf_size)
{
@@ -542,7 +543,7 @@ static unsigned char *next_record(const struct t_buf *buf, uint32_t *next,
}
else
{
- mfn = mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr + 1];
+ mfn = _mfn(mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr + 1]);
*next_page = mfn_to_virt(mfn);
}
return this_page;
@@ -19,10 +19,6 @@
#include <xsm/xsm.h>
#include <xen/hypercall.h>
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
/* Limit amount of pages used for shared buffer (per domain) */
#define MAX_OPROF_SHARED_PAGES 32
@@ -219,7 +219,7 @@ void *__init acpi_os_alloc_memory(size_t sz)
void *ptr;
if (system_state == SYS_STATE_early_boot)
- return mfn_to_virt(mfn_x(alloc_boot_pages(PFN_UP(sz), 1)));
+ return mfn_to_virt(alloc_boot_pages(PFN_UP(sz), 1));
ptr = xmalloc_bytes(sz);
ASSERT(!ptr || is_xmalloc_memory(ptr));
@@ -285,16 +285,8 @@ static inline uint64_t gvirt_to_maddr(vaddr_t va, paddr_t *pa,
#define __va(x) (maddr_to_virt(x))
/* Convert between Xen-heap virtual addresses and machine frame numbers. */
-#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
-#define __mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
-
-/*
- * We define non-underscored wrappers for above conversion functions.
- * These are overriden in various source files while underscored version
- * remain intact.
- */
-#define virt_to_mfn(va) __virt_to_mfn(va)
-#define mfn_to_virt(mfn) __mfn_to_virt(mfn)
+#define virt_to_mfn(va) maddr_to_mfn(virt_to_maddr(va))
+#define mfn_to_virt(mfn) maddr_to_virt(mfn_to_maddr(mfn))
/* Convert between Xen-heap virtual addresses and page-info structures. */
static inline struct page_info *virt_to_page(const void *v)
@@ -312,7 +304,7 @@ static inline struct page_info *virt_to_page(const void *v)
static inline void *page_to_virt(const struct page_info *pg)
{
- return mfn_to_virt(mfn_x(page_to_mfn(pg)));
+ return mfn_to_virt(page_to_mfn(pg));
}
struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va,
@@ -45,11 +45,11 @@ static inline int replace_grant_host_mapping(uint64_t addr, mfn_t frame,
VALID_M2P(gpfn_) ? _gfn(gpfn_) : INVALID_GFN; \
})
-#define gnttab_shared_mfn(t, i) _mfn(__virt_to_mfn((t)->shared_raw[i]))
+#define gnttab_shared_mfn(t, i) virt_to_mfn((t)->shared_raw[i])
#define gnttab_shared_gfn(d, t, i) mfn_to_gfn(d, gnttab_shared_mfn(t, i))
-#define gnttab_status_mfn(t, i) _mfn(__virt_to_mfn((t)->status[i]))
+#define gnttab_status_mfn(t, i) virt_to_mfn((t)->status[i])
#define gnttab_status_gfn(d, t, i) mfn_to_gfn(d, gnttab_status_mfn(t, i))
@@ -667,7 +667,7 @@ static inline bool arch_mfn_in_directmap(unsigned long mfn)
{
unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
- return mfn <= (virt_to_mfn(eva - 1) + 1);
+ return mfn <= mfn_x(mfn_add(virt_to_mfn(eva - 1), 1));
}
int arch_acquire_resource(struct domain *d, unsigned int type,
@@ -236,8 +236,8 @@ void copy_page_sse2(void *, const void *);
#define __va(x) (maddr_to_virt(x))
/* Convert between Xen-heap virtual addresses and machine frame numbers. */
-#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
-#define __mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
+#define virt_to_mfn(va) maddr_to_mfn(virt_to_maddr(va))
+#define mfn_to_virt(mfn) maddr_to_virt(mfn_to_maddr(mfn))
/* Convert between machine frame numbers and page-info structures. */
#define mfn_to_page(mfn) (frame_table + mfn_to_pdx(mfn))
@@ -260,8 +260,6 @@ void copy_page_sse2(void *, const void *);
* overridden in various source files while underscored versions remain intact.
*/
#define mfn_valid(mfn) __mfn_valid(mfn_x(mfn))
-#define virt_to_mfn(va) __virt_to_mfn(va)
-#define mfn_to_virt(mfn) __mfn_to_virt(mfn)
#define virt_to_maddr(va) __virt_to_maddr((unsigned long)(va))
#define maddr_to_virt(ma) __maddr_to_virt((unsigned long)(ma))
#define maddr_to_page(ma) __maddr_to_page(ma)
@@ -53,14 +53,14 @@ static inline void *__map_domain_page_global(const struct page_info *pg)
#else /* !CONFIG_DOMAIN_PAGE */
-#define map_domain_page(mfn) __mfn_to_virt(mfn_x(mfn))
+#define map_domain_page(mfn) mfn_to_virt(mfn)
#define __map_domain_page(pg) page_to_virt(pg)
#define unmap_domain_page(va) ((void)(va))
-#define domain_page_map_to_mfn(va) _mfn(virt_to_mfn((unsigned long)(va)))
+#define domain_page_map_to_mfn(va) virt_to_mfn((unsigned long)(va))
static inline void *map_domain_page_global(mfn_t mfn)
{
- return mfn_to_virt(mfn_x(mfn));
+ return mfn_to_virt(mfn);
}
static inline void *__map_domain_page_global(const struct page_info *pg)