@@ -193,11 +193,11 @@ static void __init acpi_map_other_tables(struct domain *d)
{
addr = acpi_gbl_root_table_list.tables[i].address;
size = acpi_gbl_root_table_list.tables[i].length;
- res = map_mmio_regions(d,
- gaddr_to_gfn(addr),
- PFN_UP(size),
- maddr_to_mfn(addr),
- p2m_mmio_direct_c);
+ res = map_regions(d,
+ gaddr_to_gfn(addr),
+ PFN_UP(size),
+ maddr_to_mfn(addr),
+ p2m_mmio_direct_c);
if ( res )
{
panic(XENLOG_ERR "Unable to map ACPI region 0x%"PRIx64
@@ -547,11 +547,11 @@ int __init prepare_acpi(struct domain *d, struct kernel_info *kinfo)
acpi_create_efi_mmap_table(d, &kinfo->mem, tbl_add);
/* Map the EFI and ACPI tables to Dom0 */
- rc = map_mmio_regions(d,
- gaddr_to_gfn(d->arch.efi_acpi_gpa),
- PFN_UP(d->arch.efi_acpi_len),
- virt_to_mfn(d->arch.efi_acpi_table),
- p2m_mmio_direct_c);
+ rc = map_regions(d,
+ gaddr_to_gfn(d->arch.efi_acpi_gpa),
+ PFN_UP(d->arch.efi_acpi_len),
+ virt_to_mfn(d->arch.efi_acpi_table),
+ p2m_mmio_direct_c);
if ( rc != 0 )
{
printk(XENLOG_ERR "Unable to map EFI/ACPI table 0x%"PRIx64
@@ -1171,11 +1171,11 @@ static int __init map_range_to_domain(const struct dt_device_node *dev,
if ( need_mapping )
{
- res = map_mmio_regions(d,
- gaddr_to_gfn(addr),
- PFN_UP(len),
- maddr_to_mfn(addr),
- mr_data->p2mt);
+ res = map_regions(d,
+ gaddr_to_gfn(addr),
+ PFN_UP(len),
+ maddr_to_mfn(addr),
+ mr_data->p2mt);
if ( res < 0 )
{
@@ -699,10 +699,10 @@ static int gicv2_map_hwdown_extra_mappings(struct domain *d)
d->domain_id, v2m_data->addr, v2m_data->size,
v2m_data->spi_start, v2m_data->nr_spis);
- ret = map_mmio_regions(d, gaddr_to_gfn(v2m_data->addr),
- PFN_UP(v2m_data->size),
- maddr_to_mfn(v2m_data->addr),
- p2m_mmio_direct_dev);
+ ret = map_regions(d, gaddr_to_gfn(v2m_data->addr),
+ PFN_UP(v2m_data->size),
+ maddr_to_mfn(v2m_data->addr),
+ p2m_mmio_direct_dev);
if ( ret )
{
printk(XENLOG_ERR "GICv2: Map v2m frame to d%d failed.\n",
@@ -1331,19 +1331,19 @@ static inline int p2m_remove_mapping(struct domain *d,
return rc;
}
-int map_mmio_regions(struct domain *d,
- gfn_t gfn,
- unsigned long nr,
- mfn_t mfn,
- p2m_type_t p2mt)
+int map_regions(struct domain *d,
+ gfn_t gfn,
+ unsigned long nr,
+ mfn_t mfn,
+ p2m_type_t p2mt)
{
return p2m_insert_mapping(d, gfn, nr, mfn, p2mt);
}
-int unmap_mmio_regions(struct domain *d,
- gfn_t start_gfn,
- unsigned long nr,
- mfn_t mfn)
+int unmap_regions(struct domain *d,
+ gfn_t start_gfn,
+ unsigned long nr,
+ mfn_t mfn)
{
return p2m_remove_mapping(d, start_gfn, nr, mfn);
}
@@ -82,14 +82,14 @@ static int exynos5_init_time(void)
static int exynos5250_specific_mapping(struct domain *d)
{
/* Map the chip ID */
- map_mmio_regions(d, gaddr_to_gfn(EXYNOS5_PA_CHIPID), 1,
- maddr_to_mfn(EXYNOS5_PA_CHIPID),
- p2m_mmio_direct_dev);
+ map_regions(d, gaddr_to_gfn(EXYNOS5_PA_CHIPID), 1,
+ maddr_to_mfn(EXYNOS5_PA_CHIPID),
+ p2m_mmio_direct_dev);
/* Map the PWM region */
- map_mmio_regions(d, gaddr_to_gfn(EXYNOS5_PA_TIMER), 2,
- maddr_to_mfn(EXYNOS5_PA_TIMER),
- p2m_mmio_direct_dev);
+ map_regions(d, gaddr_to_gfn(EXYNOS5_PA_TIMER), 2,
+ maddr_to_mfn(EXYNOS5_PA_TIMER),
+ p2m_mmio_direct_dev);
return 0;
}
@@ -98,24 +98,24 @@ static int omap5_init_time(void)
static int omap5_specific_mapping(struct domain *d)
{
/* Map the PRM module */
- map_mmio_regions(d, gaddr_to_gfn(OMAP5_PRM_BASE), 2,
- maddr_to_mfn(OMAP5_PRM_BASE),
- p2m_mmio_direct_dev);
+ map_regions(d, gaddr_to_gfn(OMAP5_PRM_BASE), 2,
+ maddr_to_mfn(OMAP5_PRM_BASE),
+ p2m_mmio_direct_dev);
/* Map the PRM_MPU */
- map_mmio_regions(d, gaddr_to_gfn(OMAP5_PRCM_MPU_BASE), 1,
- maddr_to_mfn(OMAP5_PRCM_MPU_BASE),
- p2m_mmio_direct_dev);
+ map_regions(d, gaddr_to_gfn(OMAP5_PRCM_MPU_BASE), 1,
+ maddr_to_mfn(OMAP5_PRCM_MPU_BASE),
+ p2m_mmio_direct_dev);
/* Map the Wakeup Gen */
- map_mmio_regions(d, gaddr_to_gfn(OMAP5_WKUPGEN_BASE), 1,
- maddr_to_mfn(OMAP5_WKUPGEN_BASE),
- p2m_mmio_direct_dev);
+ map_regions(d, gaddr_to_gfn(OMAP5_WKUPGEN_BASE), 1,
+ maddr_to_mfn(OMAP5_WKUPGEN_BASE),
+ p2m_mmio_direct_dev);
/* Map the on-chip SRAM */
- map_mmio_regions(d, gaddr_to_gfn(OMAP5_SRAM_PA), 32,
- maddr_to_mfn(OMAP5_SRAM_PA),
- p2m_mmio_direct_dev);
+ map_regions(d, gaddr_to_gfn(OMAP5_SRAM_PA), 32,
+ maddr_to_mfn(OMAP5_SRAM_PA),
+ p2m_mmio_direct_dev);
return 0;
}
@@ -1887,7 +1887,7 @@ static bool try_map_mmio(gfn_t gfn)
if ( !iomem_access_permitted(d, mfn_x(mfn), mfn_x(mfn) + 1) )
return false;
- return !map_mmio_regions(d, gfn, 1, mfn, p2m_mmio_direct_c);
+ return !map_regions(d, gfn, 1, mfn, p2m_mmio_direct_c);
}
static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
@@ -690,8 +690,8 @@ static int vgic_v2_domain_init(struct domain *d)
* Map the gic virtual cpu interface in the gic cpu interface
* region of the guest.
*/
- ret = map_mmio_regions(d, gaddr_to_gfn(cbase), csize / PAGE_SIZE,
- maddr_to_mfn(vbase), p2m_mmio_direct_dev);
+ ret = map_regions(d, gaddr_to_gfn(cbase), csize / PAGE_SIZE,
+ maddr_to_mfn(vbase), p2m_mmio_direct_dev);
if ( ret )
return ret;
@@ -308,8 +308,8 @@ int vgic_v2_map_resources(struct domain *d)
* Map the gic virtual cpu interface in the gic cpu interface
* region of the guest.
*/
- ret = map_mmio_regions(d, gaddr_to_gfn(cbase), csize / PAGE_SIZE,
- maddr_to_mfn(vbase), p2m_mmio_direct_dev);
+ ret = map_regions(d, gaddr_to_gfn(cbase), csize / PAGE_SIZE,
+ maddr_to_mfn(vbase), p2m_mmio_direct_dev);
if ( ret )
{
gdprintk(XENLOG_ERR, "Unable to remap VGIC CPU to VCPU\n");
@@ -80,10 +80,10 @@ static int __init modify_identity_mmio(struct domain *d, unsigned long pfn,
for ( ; ; )
{
if ( map )
- rc = map_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn),
- p2m_mmio_direct);
+ rc = map_regions(d, _gfn(pfn), nr_pages, _mfn(pfn),
+ p2m_mmio_direct);
else
- rc = unmap_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn));
+ rc = unmap_regions(d, _gfn(pfn), nr_pages, _mfn(pfn));
if ( rc == 0 )
break;
if ( rc < 0 )
@@ -2261,11 +2261,11 @@ static unsigned int mmio_order(const struct domain *d,
#define MAP_MMIO_MAX_ITER 64 /* pretty arbitrary */
-int map_mmio_regions(struct domain *d,
- gfn_t start_gfn,
- unsigned long nr,
- mfn_t mfn,
- p2m_type_t p2mt)
+int map_regions(struct domain *d,
+ gfn_t start_gfn,
+ unsigned long nr,
+ mfn_t mfn,
+ p2m_type_t p2mt)
{
int ret = 0;
unsigned long i;
@@ -2298,10 +2298,10 @@ int map_mmio_regions(struct domain *d,
return i == nr ? 0 : i ?: ret;
}
-int unmap_mmio_regions(struct domain *d,
- gfn_t start_gfn,
- unsigned long nr,
- mfn_t mfn)
+int unmap_regions(struct domain *d,
+ gfn_t start_gfn,
+ unsigned long nr,
+ mfn_t mfn)
{
int ret = 0;
unsigned long i;
@@ -961,7 +961,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
"memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
d->domain_id, gfn, mfn, nr_mfns);
- ret = map_mmio_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn), p2mt);
+ ret = map_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn), p2mt);
if ( ret < 0 )
printk(XENLOG_G_WARNING
"memory_map:fail: dom%d gfn=%lx mfn=%lx nr=%lx ret:%ld\n",
@@ -973,7 +973,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
"memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
d->domain_id, gfn, mfn, nr_mfns);
- ret = unmap_mmio_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn));
+ ret = unmap_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn));
if ( ret < 0 && is_hardware_domain(current->domain) )
printk(XENLOG_ERR
"memory_map: error %ld removing dom%d access to [%lx,%lx]\n",
@@ -46,15 +46,15 @@ static int map_range(unsigned long s, unsigned long e, void *data,
/*
* ARM TODOs:
* - On ARM whether the memory is prefetchable or not should be passed
- * to map_mmio_regions in order to decide which memory attributes
+ * to map_regions in order to decide which memory attributes
* should be used.
*
- * - {un}map_mmio_regions doesn't support preemption.
+ * - {un}map_regions doesn't support preemption.
*/
- rc = map->map ? map_mmio_regions(map->d, _gfn(s), size, _mfn(s),
+ rc = map->map ? map_regions(map->d, _gfn(s), size, _mfn(s),
p2m_mmio_direct)
- : unmap_mmio_regions(map->d, _gfn(s), size, _mfn(s));
+ : unmap_regions(map->d, _gfn(s), size, _mfn(s));
if ( rc == 0 )
{
*c += size;
@@ -8,18 +8,20 @@ int __must_check
guest_physmap_remove_page(struct domain *d, gfn_t gfn, mfn_t mfn,
unsigned int page_order);
-/* Map MMIO regions in the p2m: start_gfn and nr describe the range in
- * * the guest physical address space to map, starting from the machine
- * * frame number mfn. */
-int map_mmio_regions(struct domain *d,
- gfn_t start_gfn,
- unsigned long nr,
- mfn_t mfn,
- p2m_type_t p2mt);
-int unmap_mmio_regions(struct domain *d,
- gfn_t start_gfn,
- unsigned long nr,
- mfn_t mfn);
+/*
+ * Map memory regions in the p2m: start_gfn and nr describe the range in
+ * the guest physical address space to map, starting from the machine
+ * frame number mfn.
+ */
+int map_regions(struct domain *d,
+ gfn_t start_gfn,
+ unsigned long nr,
+ mfn_t mfn,
+ p2m_type_t p2mt);
+int unmap_regions(struct domain *d,
+ gfn_t start_gfn,
+ unsigned long nr,
+ mfn_t mfn);
/*
* Populate-on-Demand
Now that map_mmio_regions takes a p2mt parameter, there is no need to keep "mmio" in the name. The p2mt parameter does a better job at expressing what the mapping is about. Let's save the environment 5 characters at a time. Also fix the comment on top of map_mmio_regions. Signed-off-by: Stefano Stabellini <stefanos@xilinx.com> CC: JBeulich@suse.com CC: andrew.cooper3@citrix.com --- Changes in v2: - new patch --- xen/arch/arm/acpi/domain_build.c | 20 ++++++++++---------- xen/arch/arm/domain_build.c | 10 +++++----- xen/arch/arm/gic-v2.c | 8 ++++---- xen/arch/arm/p2m.c | 18 +++++++++--------- xen/arch/arm/platforms/exynos5.c | 12 ++++++------ xen/arch/arm/platforms/omap5.c | 24 ++++++++++++------------ xen/arch/arm/traps.c | 2 +- xen/arch/arm/vgic-v2.c | 4 ++-- xen/arch/arm/vgic/vgic-v2.c | 4 ++-- xen/arch/x86/hvm/dom0_build.c | 6 +++--- xen/arch/x86/mm/p2m.c | 18 +++++++++--------- xen/common/domctl.c | 4 ++-- xen/drivers/vpci/header.c | 8 ++++---- xen/include/xen/p2m-common.h | 26 ++++++++++++++------------ 14 files changed, 83 insertions(+), 81 deletions(-)