@@ -197,9 +197,6 @@ static inline shr_handle_t get_next_handle(void)
return x + 1;
}
-#define mem_sharing_enabled(d) \
- (is_hvm_domain(d) && (d)->arch.hvm.mem_sharing_enabled)
-
static atomic_t nr_saved_mfns = ATOMIC_INIT(0);
static atomic_t nr_shared_mfns = ATOMIC_INIT(0);
@@ -1300,6 +1297,7 @@ int __mem_sharing_unshare_page(struct domain *d,
int relinquish_shared_pages(struct domain *d)
{
int rc = 0;
+ struct mem_sharing_domain *msd = &d->arch.hvm.mem_sharing;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
unsigned long gfn, count = 0;
@@ -1307,7 +1305,7 @@ int relinquish_shared_pages(struct domain *d)
return 0;
p2m_lock(p2m);
- for ( gfn = p2m->next_shared_gfn_to_relinquish;
+ for ( gfn = msd->next_shared_gfn_to_relinquish;
gfn <= p2m->max_mapped_pfn; gfn++ )
{
p2m_access_t a;
@@ -1342,7 +1340,7 @@ int relinquish_shared_pages(struct domain *d)
{
if ( hypercall_preempt_check() )
{
- p2m->next_shared_gfn_to_relinquish = gfn + 1;
+ msd->next_shared_gfn_to_relinquish = gfn + 1;
rc = -ERESTART;
break;
}
@@ -1428,7 +1426,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
/* Only HAP is supported */
rc = -ENODEV;
- if ( !hap_enabled(d) || !d->arch.hvm.mem_sharing_enabled )
+ if ( !mem_sharing_enabled(d) )
goto out;
switch ( mso.op )
@@ -1437,10 +1435,6 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
{
shr_handle_t handle;
- rc = -EINVAL;
- if ( !mem_sharing_enabled(d) )
- goto out;
-
rc = nominate_page(d, _gfn(mso.u.nominate.u.gfn), 0, &handle);
mso.u.nominate.handle = handle;
}
@@ -1452,9 +1446,6 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
gfn_t gfn;
shr_handle_t handle;
- rc = -EINVAL;
- if ( !mem_sharing_enabled(d) )
- goto out;
rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &gfn, NULL);
if ( rc < 0 )
goto out;
@@ -1470,10 +1461,6 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
struct domain *cd;
shr_handle_t sh, ch;
- rc = -EINVAL;
- if ( !mem_sharing_enabled(d) )
- goto out;
-
rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain,
&cd);
if ( rc )
@@ -1540,10 +1527,6 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
struct domain *cd;
shr_handle_t sh;
- rc = -EINVAL;
- if ( !mem_sharing_enabled(d) )
- goto out;
-
rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain,
&cd);
if ( rc )
@@ -1602,9 +1585,6 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
mso.u.range.opaque > mso.u.range.last_gfn) )
goto out;
- if ( !mem_sharing_enabled(d) )
- goto out;
-
rc = rcu_lock_live_remote_domain_by_id(mso.u.range.client_domain,
&cd);
if ( rc )
@@ -1708,7 +1688,7 @@ int mem_sharing_domctl(struct domain *d, struct xen_domctl_mem_sharing_op *mec)
if ( unlikely(is_iommu_enabled(d) && mec->u.enable) )
rc = -EXDEV;
else
- d->arch.hvm.mem_sharing_enabled = mec->u.enable;
+ d->arch.hvm.mem_sharing.enabled = mec->u.enable;
}
break;
@@ -1498,8 +1498,7 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag)
/* Prevent device assign if mem paging or mem sharing have been
* enabled for this domain */
if ( d != dom_io &&
- unlikely((is_hvm_domain(d) &&
- d->arch.hvm.mem_sharing_enabled) ||
+ unlikely(mem_sharing_enabled(d) ||
vm_event_check_ring(d->vm_event_paging) ||
p2m_get_hostp2m(d)->global_logdirty) )
return -EXDEV;
@@ -29,6 +29,7 @@
#include <asm/hvm/viridian.h>
#include <asm/hvm/vmx/vmcs.h>
#include <asm/hvm/svm/vmcb.h>
+#include <asm/mem_sharing.h>
#include <public/grant_table.h>
#include <public/hvm/params.h>
#include <public/hvm/save.h>
@@ -156,7 +157,6 @@ struct hvm_domain {
struct viridian_domain *viridian;
- bool_t mem_sharing_enabled;
bool_t qemu_mapcache_invalidate;
bool_t is_s3_suspended;
@@ -192,6 +192,10 @@ struct hvm_domain {
struct vmx_domain vmx;
struct svm_domain svm;
};
+
+#ifdef CONFIG_MEM_SHARING
+ struct mem_sharing_domain mem_sharing;
+#endif
};
#endif /* __ASM_X86_HVM_DOMAIN_H__ */
@@ -26,6 +26,20 @@
#ifdef CONFIG_MEM_SHARING
+struct mem_sharing_domain
+{
+ bool enabled;
+
+ /*
+ * When releasing shared gfn's in a preemptible manner, recall where
+ * to resume the search.
+ */
+ unsigned long next_shared_gfn_to_relinquish;
+};
+
+#define mem_sharing_enabled(d) \
+ (hap_enabled(d) && (d)->arch.hvm.mem_sharing.enabled)
+
/* Auditing of memory sharing code? */
#ifndef NDEBUG
#define MEM_SHARING_AUDIT 1
@@ -105,6 +119,8 @@ int relinquish_shared_pages(struct domain *d);
#else
+#define mem_sharing_enabled(d) false
+
static inline unsigned int mem_sharing_get_nr_saved_mfns(void)
{
return 0;
@@ -305,10 +305,6 @@ struct p2m_domain {
unsigned long min_remapped_gfn;
unsigned long max_remapped_gfn;
- /* When releasing shared gfn's in a preemptible manner, recall where
- * to resume the search */
- unsigned long next_shared_gfn_to_relinquish;
-
#ifdef CONFIG_HVM
/* Populate-on-demand variables
* All variables are protected with the pod lock. We cannot rely on
Create struct mem_sharing_domain under hvm_domain and move mem sharing variables into it from p2m_domain and hvm_domain. Expose the mem_sharing_enabled macro to be used consistently across Xen. Remove some duplicate calls to mem_sharing_enabled in mem_sharing.c Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com> --- xen/arch/x86/mm/mem_sharing.c | 30 +++++------------------------- xen/drivers/passthrough/pci.c | 3 +-- xen/include/asm-x86/hvm/domain.h | 6 +++++- xen/include/asm-x86/mem_sharing.h | 16 ++++++++++++++++ xen/include/asm-x86/p2m.h | 4 ---- 5 files changed, 27 insertions(+), 32 deletions(-)