@@ -75,6 +75,7 @@ choice
config MMU
bool "MMU"
+ select ARCH_PAGING_MEMPOOL
select HAS_LLC_COLORING if !NUMA && ARM_64
select HAS_PMAP
select HAS_VMAP
@@ -673,21 +673,6 @@ static int __init prepare_dtb_domU(struct domain *d, struct kernel_info *kinfo)
return -EINVAL;
}
-static unsigned long __init domain_p2m_pages(unsigned long maxmem_kb,
- unsigned int smp_cpus)
-{
- /*
- * Keep in sync with libxl__get_required_paging_memory().
- * 256 pages (1MB) per vcpu, plus 1 page per MiB of RAM for the P2M map,
- * plus 128 pages to cover extended regions.
- */
- unsigned long memkb = 4 * (256 * smp_cpus + (maxmem_kb / 1024) + 128);
-
- BUILD_BUG_ON(PAGE_SIZE != SZ_4K);
-
- return DIV_ROUND_UP(memkb, 1024) << (20 - PAGE_SHIFT);
-}
-
static int __init alloc_xenstore_evtchn(struct domain *d)
{
evtchn_alloc_unbound_t alloc;
@@ -841,6 +826,49 @@ static void __init domain_vcpu_affinity(struct domain *d,
}
}
+#ifdef CONFIG_ARCH_PAGING_MEMPOOL
+static unsigned long __init domain_p2m_pages(unsigned long maxmem_kb,
+ unsigned int smp_cpus)
+{
+ /*
+ * Keep in sync with libxl__get_required_paging_memory().
+ * 256 pages (1MB) per vcpu, plus 1 page per MiB of RAM for the P2M map,
+ * plus 128 pages to cover extended regions.
+ */
+ unsigned long memkb = 4 * (256 * smp_cpus + (maxmem_kb / 1024) + 128);
+
+ BUILD_BUG_ON(PAGE_SIZE != SZ_4K);
+
+ return DIV_ROUND_UP(memkb, 1024) << (20 - PAGE_SHIFT);
+}
+
+static int __init domain_p2m_set_allocation(struct domain *d, uint64_t mem,
+ const struct dt_device_node *node)
+{
+ unsigned long p2m_pages;
+ uint32_t p2m_mem_mb;
+ int rc;
+
+ rc = dt_property_read_u32(node, "xen,domain-p2m-mem-mb", &p2m_mem_mb);
+ /* If xen,domain-p2m-mem-mb is not specified, use the default value. */
+ p2m_pages = rc ?
+ p2m_mem_mb << (20 - PAGE_SHIFT) :
+ domain_p2m_pages(mem, d->max_vcpus);
+
+ spin_lock(&d->arch.paging.lock);
+ rc = p2m_set_allocation(d, p2m_pages, NULL);
+ spin_unlock(&d->arch.paging.lock);
+
+ return rc;
+}
+#else /* !CONFIG_ARCH_PAGING_MEMPOOL */
+static inline int domain_p2m_set_allocation(struct domain *d, uint64_t mem,
+ const struct dt_device_node *node)
+{
+ return 0;
+}
+#endif /* CONFIG_ARCH_PAGING_MEMPOOL */
+
static int __init construct_domU(struct domain *d,
const struct dt_device_node *node)
{
@@ -848,8 +876,6 @@ static int __init construct_domU(struct domain *d,
const char *dom0less_enhanced;
int rc;
u64 mem;
- u32 p2m_mem_mb;
- unsigned long p2m_pages;
rc = dt_property_read_u64(node, "memory", &mem);
if ( !rc )
@@ -859,15 +885,7 @@ static int __init construct_domU(struct domain *d,
}
kinfo.unassigned_mem = (paddr_t)mem * SZ_1K;
- rc = dt_property_read_u32(node, "xen,domain-p2m-mem-mb", &p2m_mem_mb);
- /* If xen,domain-p2m-mem-mb is not specified, use the default value. */
- p2m_pages = rc ?
- p2m_mem_mb << (20 - PAGE_SHIFT) :
- domain_p2m_pages(mem, d->max_vcpus);
-
- spin_lock(&d->arch.paging.lock);
- rc = p2m_set_allocation(d, p2m_pages, NULL);
- spin_unlock(&d->arch.paging.lock);
+ rc = domain_p2m_set_allocation(d, mem, node);
if ( rc != 0 )
return rc;
@@ -75,7 +75,9 @@ struct arch_domain
struct hvm_domain hvm;
+#ifdef CONFIG_ARCH_PAGING_MEMPOOL
struct paging_domain paging;
+#endif
struct vmmio vmmio;
@@ -335,11 +335,6 @@ void arch_do_physinfo(struct xen_sysctl_physinfo *pi)
/* p2m.c */
-int arch_set_paging_mempool_size(struct domain *d, uint64_t size)
-{
- BUG_ON("unimplemented");
-}
-
int unmap_mmio_regions(struct domain *d,
gfn_t start_gfn,
unsigned long nr,
@@ -362,12 +357,6 @@ int set_foreign_p2m_entry(struct domain *d, const struct domain *fd,
BUG_ON("unimplemented");
}
-/* Return the size of the pool, in bytes. */
-int arch_get_paging_mempool_size(struct domain *d, uint64_t *size)
-{
- BUG_ON("unimplemented");
-}
-
int guest_physmap_remove_page(struct domain *d, gfn_t gfn, mfn_t mfn,
unsigned int page_order)
{
@@ -9,6 +9,7 @@ config X86
select ACPI_NUMA
select ALTERNATIVE_CALL
select ARCH_MAP_DOMAIN_PAGE
+ select ARCH_PAGING_MEMPOOL
select ARCH_SUPPORTS_INT128
imply CORE_PARKING
select FUNCTION_ALIGNMENT_16B
@@ -44,6 +44,9 @@ config ALTERNATIVE_CALL
config ARCH_MAP_DOMAIN_PAGE
bool
+config ARCH_PAGING_MEMPOOL
+ bool
+
config GENERIC_BUG_FRAME
bool
@@ -2,6 +2,7 @@
#ifndef __XEN_DOMAIN_H__
#define __XEN_DOMAIN_H__
+#include <xen/errno.h>
#include <xen/numa.h>
#include <xen/types.h>
@@ -114,9 +115,25 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c);
int arch_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg);
int default_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg);
+#ifdef CONFIG_ARCH_PAGING_MEMPOOL
+
int arch_get_paging_mempool_size(struct domain *d, uint64_t *size /* bytes */);
int arch_set_paging_mempool_size(struct domain *d, uint64_t size /* bytes */);
+#else /* !CONFIG_ARCH_PAGING_MEMPOOL */
+
+static inline int arch_get_paging_mempool_size(struct domain *d, uint64_t *size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int arch_set_paging_mempool_size(struct domain *d, uint64_t size)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_ARCH_PAGING_MEMPOOL */
+
bool update_runstate_area(struct vcpu *v);
int domain_relinquish_resources(struct domain *d);