@@ -197,9 +197,6 @@ static shr_handle_t get_next_handle(void)
return x + 1;
}
-#define mem_sharing_enabled(d) \
- (is_hvm_domain(d) && (d)->arch.hvm.mem_sharing_enabled)
-
static atomic_t nr_saved_mfns = ATOMIC_INIT(0);
static atomic_t nr_shared_mfns = ATOMIC_INIT(0);
@@ -1297,6 +1294,7 @@ int __mem_sharing_unshare_page(struct domain *d,
int relinquish_shared_pages(struct domain *d)
{
int rc = 0;
+ struct mem_sharing_domain *msd = &d->arch.hvm.mem_sharing;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
unsigned long gfn, count = 0;
@@ -1304,7 +1302,7 @@ int relinquish_shared_pages(struct domain *d)
return 0;
p2m_lock(p2m);
- for ( gfn = p2m->next_shared_gfn_to_relinquish;
+ for ( gfn = msd->next_shared_gfn_to_relinquish;
gfn <= p2m->max_mapped_pfn; gfn++ )
{
p2m_access_t a;
@@ -1339,7 +1337,7 @@ int relinquish_shared_pages(struct domain *d)
{
if ( hypercall_preempt_check() )
{
- p2m->next_shared_gfn_to_relinquish = gfn + 1;
+ msd->next_shared_gfn_to_relinquish = gfn + 1;
rc = -ERESTART;
break;
}
@@ -1425,7 +1423,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
/* Only HAP is supported */
rc = -ENODEV;
- if ( !hap_enabled(d) || !d->arch.hvm.mem_sharing_enabled )
+ if ( !mem_sharing_enabled(d) )
goto out;
switch ( mso.op )
@@ -1498,8 +1498,7 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag)
/* Prevent device assign if mem paging or mem sharing have been
* enabled for this domain */
if ( d != dom_io &&
- unlikely((is_hvm_domain(d) &&
- d->arch.hvm.mem_sharing_enabled) ||
+ unlikely(mem_sharing_enabled(d) ||
vm_event_check_ring(d->vm_event_paging) ||
p2m_get_hostp2m(d)->global_logdirty) )
return -EXDEV;
@@ -29,6 +29,7 @@
#include <asm/hvm/viridian.h>
#include <asm/hvm/vmx/vmcs.h>
#include <asm/hvm/svm/vmcb.h>
+#include <asm/mem_sharing.h>
#include <public/grant_table.h>
#include <public/hvm/params.h>
#include <public/hvm/save.h>
@@ -156,7 +157,6 @@ struct hvm_domain {
struct viridian_domain *viridian;
- bool_t mem_sharing_enabled;
bool_t qemu_mapcache_invalidate;
bool_t is_s3_suspended;
@@ -192,6 +192,10 @@ struct hvm_domain {
struct vmx_domain vmx;
struct svm_domain svm;
};
+
+#ifdef CONFIG_MEM_SHARING
+ struct mem_sharing_domain mem_sharing;
+#endif
};
#endif /* __ASM_X86_HVM_DOMAIN_H__ */
@@ -26,6 +26,19 @@
#ifdef CONFIG_MEM_SHARING
+struct mem_sharing_domain
+{
+ bool enabled;
+
+ /*
+ * When releasing shared gfn's in a preemptible manner, recall where
+ * to resume the search.
+ */
+ unsigned long next_shared_gfn_to_relinquish;
+};
+
+#define mem_sharing_enabled(d) ((d)->arch.hvm.mem_sharing.enabled)
+
/* Auditing of memory sharing code? */
#ifndef NDEBUG
#define MEM_SHARING_AUDIT 1
@@ -104,6 +117,8 @@ int relinquish_shared_pages(struct domain *d);
#else
+#define mem_sharing_enabled(d) false
+
static inline unsigned int mem_sharing_get_nr_saved_mfns(void)
{
return 0;
@@ -305,10 +305,6 @@ struct p2m_domain {
unsigned long min_remapped_gfn;
unsigned long max_remapped_gfn;
- /* When releasing shared gfn's in a preemptible manner, recall where
- * to resume the search */
- unsigned long next_shared_gfn_to_relinquish;
-
#ifdef CONFIG_HVM
/* Populate-on-demand variables
* All variables are protected with the pod lock. We cannot rely on