@@ -18,7 +18,6 @@ config X86
select HAS_KEXEC
select MEM_ACCESS_ALWAYS_ON
select HAS_MEM_PAGING
- select HAS_MEM_SHARING
select HAS_NS16550
select HAS_PASSTHROUGH
select HAS_PCI
@@ -199,6 +198,11 @@ config PV_SHIM_EXCLUSIVE
firmware, and will not function correctly in other scenarios.
If unsure, say N.
+
+config MEM_SHARING
+ bool "Xen memory sharing support" if EXPERT = "y"
+ depends on HVM
+
endmenu
source "common/Kconfig"
@@ -2095,6 +2095,7 @@ int domain_relinquish_resources(struct domain *d)
d->arch.auto_unmask = 0;
}
+#ifdef CONFIG_MEM_SHARING
PROGRESS(shared):
if ( is_hvm_domain(d) )
@@ -2105,6 +2106,7 @@ int domain_relinquish_resources(struct domain *d)
if ( ret )
return ret;
}
+#endif
spin_lock(&d->page_alloc_lock);
page_list_splice(&d->arch.relmem_list, &d->page_list);
@@ -1236,9 +1236,11 @@ long arch_do_domctl(
break;
}
+#ifdef CONFIG_MEM_SHARING
case XEN_DOMCTL_mem_sharing_op:
ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op);
break;
+#endif
#if P2M_AUDIT && defined(CONFIG_HVM)
case XEN_DOMCTL_audit_p2m:
@@ -6,7 +6,7 @@ obj-$(CONFIG_HVM) += guest_walk_2.o guest_walk_3.o guest_walk_4.o
obj-$(CONFIG_SHADOW_PAGING) += guest_walk_2.o guest_walk_3.o guest_walk_4.o
obj-$(CONFIG_MEM_ACCESS) += mem_access.o
obj-y += mem_paging.o
-obj-y += mem_sharing.o
+obj-$(CONFIG_MEM_SHARING) += mem_sharing.o
obj-y += p2m.o p2m-pt.o
obj-$(CONFIG_HVM) += p2m-ept.o p2m-pod.o
obj-y += paging.o
@@ -152,8 +152,10 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
case XENMEM_paging_op:
return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
+#ifdef CONFIG_MEM_SHARING
case XENMEM_sharing_op:
return mem_sharing_memop(guest_handle_cast(arg, xen_mem_sharing_op_t));
+#endif
default:
rc = -ENOSYS;
@@ -993,8 +993,10 @@ long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
case XENMEM_paging_op:
return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
+#ifdef CONFIG_MEM_SHARING
case XENMEM_sharing_op:
return mem_sharing_memop(guest_handle_cast(arg, xen_mem_sharing_op_t));
+#endif
default:
rc = -ENOSYS;
@@ -48,9 +48,6 @@ config MEM_ACCESS
config HAS_MEM_PAGING
bool
-config HAS_MEM_SHARING
- bool
-
config HAS_PDX
bool
@@ -74,7 +74,7 @@ integer_param("hardware_dom", hardware_domid);
/* Private domain structs for DOMID_XEN, DOMID_IO, etc. */
struct domain *__read_mostly dom_xen;
struct domain *__read_mostly dom_io;
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
struct domain *__read_mostly dom_cow;
#endif
@@ -549,7 +549,7 @@ void __init setup_system_domains(void)
if ( IS_ERR(dom_io) )
panic("Failed to create d[IO]: %ld\n", PTR_ERR(dom_io));
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
/*
* Initialise our COW domain.
* This domain owns sharable pages.
@@ -966,7 +966,7 @@ static void complete_domain_destroy(struct rcu_head *head)
xfree(d->vm_event_paging);
#endif
xfree(d->vm_event_monitor);
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
xfree(d->vm_event_share);
#endif
@@ -3798,7 +3798,7 @@ void grant_table_init_vcpu(struct vcpu *v)
v->maptrack_tail = MAPTRACK_TAIL;
}
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref,
gfn_t *gfn, uint16_t *status)
{
@@ -1676,7 +1676,7 @@ int check_get_page_from_gfn(struct domain *d, gfn_t gfn, bool readonly,
return -EAGAIN;
}
#endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
if ( (q & P2M_UNSHARE) && p2m_is_shared(p2mt) )
{
if ( page )
@@ -531,7 +531,7 @@ static void monitor_notification(struct vcpu *v, unsigned int port)
vm_event_resume(v->domain, v->domain->vm_event_monitor);
}
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
/* Registered with Xen-bound event channel for incoming notifications. */
static void mem_sharing_notification(struct vcpu *v, unsigned int port)
{
@@ -561,7 +561,7 @@ void vm_event_cleanup(struct domain *d)
destroy_waitqueue_head(&d->vm_event_monitor->wq);
(void)vm_event_disable(d, &d->vm_event_monitor);
}
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
if ( vm_event_check_ring(d->vm_event_share) )
{
destroy_waitqueue_head(&d->vm_event_share->wq);
@@ -703,7 +703,7 @@ int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec)
}
break;
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
case XEN_DOMCTL_VM_EVENT_OP_SHARING:
{
rc = -EINVAL;
@@ -24,6 +24,8 @@
#include <public/domctl.h>
#include <public/memory.h>
+#ifdef CONFIG_MEM_SHARING
+
/* Auditing of memory sharing code? */
#ifndef NDEBUG
#define MEM_SHARING_AUDIT 1
@@ -99,4 +101,30 @@ int mem_sharing_domctl(struct domain *d,
*/
int relinquish_shared_pages(struct domain *d);
+#else
+
+static inline unsigned int mem_sharing_get_nr_saved_mfns(void)
+{
+ return 0;
+}
+static inline unsigned int mem_sharing_get_nr_shared_mfns(void)
+{
+ return 0;
+}
+static inline int mem_sharing_unshare_page(struct domain *d,
+ unsigned long gfn,
+ uint16_t flags)
+{
+ ASSERT_UNREACHABLE();
+ return -EOPNOTSUPP;
+}
+static inline int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
+ bool allow_sleep)
+{
+ ASSERT_UNREACHABLE();
+ return -EOPNOTSUPP;
+}
+
+#endif
+
#endif /* __MEM_SHARING_H__ */
@@ -127,6 +127,8 @@ struct page_info
/* For non-pinnable single-page shadows, a higher entry that points
* at us. */
paddr_t up;
+
+#ifdef CONFIG_MEM_SHARING
/* For shared/sharable pages, we use a doubly-linked list
* of all the {pfn,domain} pairs that map this page. We also include
* an opaque handle, which is effectively a version, so that clients
@@ -134,6 +136,7 @@ struct page_info
* This list is allocated and freed when a page is shared/unshared.
*/
struct page_sharing_info *sharing;
+#endif
};
/* Reference count and various PGC_xxx flags and fields. */
@@ -275,7 +275,7 @@ struct npfec {
/* Private domain structs for DOMID_XEN, DOMID_IO, etc. */
extern struct domain *dom_xen, *dom_io;
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
extern struct domain *dom_cow;
#else
# define dom_cow NULL
@@ -459,7 +459,7 @@ struct domain
/* Various vm_events */
/* Memory sharing support */
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
struct vm_event_domain *vm_event_share;
#endif
/* Memory paging support */
@@ -597,7 +597,7 @@ static XSM_INLINE int xsm_mem_paging(XSM_DEFAULT_ARG struct domain *d)
}
#endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
static XSM_INLINE int xsm_mem_sharing(XSM_DEFAULT_ARG struct domain *d)
{
XSM_ASSERT_ACTION(XSM_DM_PRIV);
@@ -150,7 +150,7 @@ struct xsm_operations {
int (*mem_paging) (struct domain *d);
#endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
int (*mem_sharing) (struct domain *d);
#endif
@@ -597,7 +597,7 @@ static inline int xsm_mem_paging (xsm_default_t def, struct domain *d)
}
#endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
static inline int xsm_mem_sharing (xsm_default_t def, struct domain *d)
{
return xsm_ops->mem_sharing(d);
@@ -128,7 +128,7 @@ void __init xsm_fixup_ops (struct xsm_operations *ops)
set_to_dummy_if_null(ops, mem_paging);
#endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
set_to_dummy_if_null(ops, mem_sharing);
#endif
@@ -1262,7 +1262,7 @@ static int flask_mem_paging(struct domain *d)
}
#endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
static int flask_mem_sharing(struct domain *d)
{
return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_SHARING);
@@ -1829,7 +1829,7 @@ static struct xsm_operations flask_ops = {
.mem_paging = flask_mem_paging,
#endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
.mem_sharing = flask_mem_sharing,
#endif