@@ -300,6 +300,100 @@ static void __init xen_acpi_guest_init(void)
#endif
}
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+int arch_xen_unpopulated_init_resource(struct resource *res)
+{
+ struct xen_get_unallocated_space xgus;
+ struct xen_unallocated_region *regions;
+ xen_pfn_t min_gpfn = -1, max_gpfn = 0;
+ unsigned int i, nr_regions;
+ struct resource *tmp_res;
+ int rc;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ /* Query hypervisor to find as many unused memory regions as possible */
+ nr_regions = XEN_MAX_UNALLOCATED_REGIONS;
+ regions = kcalloc(nr_regions, sizeof(regions[0]), GFP_KERNEL);
+ if (!regions)
+ return -ENOMEM;
+
+ xgus.domid = DOMID_SELF;
+ xgus.nr_regions = nr_regions;
+ set_xen_guest_handle(xgus.buffer, regions);
+
+ rc = HYPERVISOR_memory_op(XENMEM_get_unallocated_space, &xgus);
+ if (rc) {
+ pr_warn("XENMEM_get_unallocated_space failed, err=%d\n", rc);
+ goto err;
+ }
+
+ if (WARN_ON(xgus.nr_regions == 0)) {
+ rc = -EINVAL;
+ goto err;
+ }
+ nr_regions = xgus.nr_regions;
+
+ /*
+ * Create resource from memory regions provided by the hypervisor to be
+ * used as unallocated address space for Xen scratch pages.
+ */
+ for (i = 0; i < nr_regions; i++) {
+ if (max_gpfn < regions[i].start_gpfn + regions[i].nr_gpfns)
+ max_gpfn = regions[i].start_gpfn + regions[i].nr_gpfns;
+ if (min_gpfn > regions[i].start_gpfn)
+ min_gpfn = regions[i].start_gpfn;
+ }
+ res->start = min_gpfn << PAGE_SHIFT;
+ res->end = (max_gpfn << PAGE_SHIFT) - 1;
+
+ /*
+ * As memory regions are not necessarily completely sequential calculate
+ * and reserve the possible holes. The rest of that address space will be
+ * available for the allocation.
+ */
+ for (i = 1; i < nr_regions; i++) {
+ resource_size_t start, end;
+
+ start = (regions[i - 1].start_gpfn << PAGE_SHIFT) +
+ regions[i - 1].nr_gpfns * PAGE_SIZE;
+ end = regions[i].start_gpfn << PAGE_SHIFT;
+
+ if (WARN_ON(start > end)) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (start == end)
+ continue;
+
+ tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL);
+ if (!tmp_res) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ tmp_res->name = "Unavailable space";
+ tmp_res->start = start;
+ tmp_res->end = end - 1;
+
+ rc = request_resource(res, tmp_res);
+ if (rc) {
+ pr_err("Cannot insert IOMEM resource [%llx - %llx]\n",
+ tmp_res->start, tmp_res->end);
+ kfree(tmp_res);
+ goto err;
+ }
+ }
+
+err:
+ kfree(regions);
+
+ return rc;
+}
+#endif
+
static void __init xen_dt_guest_init(void)
{
struct device_node *xen_node;
@@ -296,7 +296,7 @@ config XEN_FRONT_PGDIR_SHBUF
config XEN_UNPOPULATED_ALLOC
bool "Use unpopulated memory ranges for guest mappings"
- depends on X86 && ZONE_DEVICE
+ depends on ZONE_DEVICE
default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0
help
Use unpopulated memory ranges in order to create mappings for guest
@@ -15,13 +15,39 @@ static DEFINE_MUTEX(list_lock);
static struct page *page_list;
static unsigned int list_count;
+static struct resource *target_resource;
+static struct resource xen_resource = {
+ .name = "Xen unallocated space",
+};
+
+int __weak arch_xen_unpopulated_init_resource(struct resource *res)
+{
+ return -ENOSYS;
+}
+
static int fill_list(unsigned int nr_pages)
{
struct dev_pagemap *pgmap;
- struct resource *res;
+ struct resource *res, *tmp_res = NULL;
void *vaddr;
unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
- int ret = -ENOMEM;
+ int ret;
+
+ /*
+ * Try to use Xen resource the first and fall back to default resource
+ * if arch doesn't offer one.
+ */
+ if (!target_resource) {
+ ret = arch_xen_unpopulated_init_resource(&xen_resource);
+ if (!ret) {
+ target_resource = &xen_resource;
+ } else if (ret == -ENOSYS) {
+ target_resource = &iomem_resource;
+ } else {
+ pr_err("Cannot initialize Xen resource\n");
+ return ret;
+ }
+ }
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
@@ -30,7 +56,7 @@ static int fill_list(unsigned int nr_pages)
res->name = "Xen scratch";
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- ret = allocate_resource(&iomem_resource, res,
+ ret = allocate_resource(target_resource, res,
alloc_pages * PAGE_SIZE, 0, -1,
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
if (ret < 0) {
@@ -38,6 +64,31 @@ static int fill_list(unsigned int nr_pages)
goto err_resource;
}
+ /*
+ * Reserve the region previously allocated from Xen resource to avoid
+ * re-using it by someone else.
+ */
+ if (target_resource != &iomem_resource) {
+ tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL);
+ if (!res) {
+ ret = -ENOMEM;
+ goto err_insert;
+ }
+
+ tmp_res->name = res->name;
+ tmp_res->start = res->start;
+ tmp_res->end = res->end;
+ tmp_res->flags = res->flags;
+
+ ret = insert_resource(&iomem_resource, tmp_res);
+ if (ret < 0) {
+ pr_err("Cannot insert IOMEM resource [%llx - %llx]\n",
+ tmp_res->start, tmp_res->end);
+ kfree(tmp_res);
+ goto err_insert;
+ }
+ }
+
pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
if (!pgmap) {
ret = -ENOMEM;
@@ -96,6 +147,11 @@ static int fill_list(unsigned int nr_pages)
err_memremap:
kfree(pgmap);
err_pgmap:
+ if (tmp_res) {
+ release_resource(tmp_res);
+ kfree(tmp_res);
+ }
+err_insert:
release_resource(res);
err_resource:
kfree(res);
@@ -325,4 +325,37 @@ struct xen_mem_acquire_resource {
};
DEFINE_GUEST_HANDLE_STRUCT(xen_mem_acquire_resource);
+/*
+ * Get the unallocated space (regions of guest physical address space which
+ * are unused) and can be used to create grant/foreign mappings.
+ */
+#define XENMEM_get_unallocated_space 29
+struct xen_unallocated_region {
+ xen_pfn_t start_gpfn;
+ xen_ulong_t nr_gpfns;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_unallocated_region);
+
+#define XEN_MAX_UNALLOCATED_REGIONS 32
+
+struct xen_get_unallocated_space {
+ /* IN - Which domain to provide unallocated space for */
+ domid_t domid;
+
+ /*
+ * IN/OUT - As an IN parameter number of memory regions which
+ * can be written to the buffer (maximum size of the array)
+ * As OUT parameter number of memory regions which
+ * have been written to the buffer
+ */
+ unsigned int nr_regions;
+
+ /*
+ * OUT - An array of memory regions, the regions must be placed in
+ * ascending order, there must be no overlap between them.
+ */
+ GUEST_HANDLE(xen_unallocated_region) buffer;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_get_unallocated_space);
+
#endif /* __XEN_PUBLIC_MEMORY_H__ */
@@ -55,6 +55,8 @@ extern u64 xen_saved_max_mem_size;
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
+struct resource;
+int arch_xen_unpopulated_init_resource(struct resource *res);
#else
#define xen_alloc_unpopulated_pages alloc_xenballooned_pages
#define xen_free_unpopulated_pages free_xenballooned_pages