@@ -954,7 +954,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
unsigned long put_count = 0;
get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
- cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
+ cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg, true);
/*
* This tricky business is to avoid two callers deadlocking if
@@ -1068,7 +1068,7 @@ err_out:
}
int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle_t sh,
- struct domain *cd, unsigned long cgfn)
+ struct domain *cd, unsigned long cgfn, bool lock)
{
struct page_info *spage;
int ret = -EINVAL;
@@ -1080,7 +1080,7 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle
struct two_gfns tg;
get_two_gfns(sd, _gfn(sgfn), &smfn_type, NULL, &smfn,
- cd, _gfn(cgfn), &cmfn_type, &a, &cmfn, 0, &tg);
+ cd, _gfn(cgfn), &cmfn_type, &a, &cmfn, 0, &tg, lock);
/* Get the source shared page, check and lock */
ret = XENMEM_SHARING_OP_S_HANDLE_INVALID;
@@ -1155,7 +1155,8 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle
err_unlock:
mem_sharing_page_unlock(spage);
err_out:
- put_two_gfns(&tg);
+ if ( lock )
+ put_two_gfns(&tg);
return ret;
}
@@ -1574,7 +1575,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
sh = mso.u.share.source_handle;
cgfn = mso.u.share.client_gfn;
- rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn);
+ rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn, true);
rcu_unlock_domain(cd);
}
@@ -539,7 +539,7 @@ struct two_gfns {
static inline void get_two_gfns(struct domain *rd, gfn_t rgfn,
p2m_type_t *rt, p2m_access_t *ra, mfn_t *rmfn, struct domain *ld,
gfn_t lgfn, p2m_type_t *lt, p2m_access_t *la, mfn_t *lmfn,
- p2m_query_t q, struct two_gfns *rval)
+ p2m_query_t q, struct two_gfns *rval, bool lock)
{
mfn_t *first_mfn, *second_mfn, scratch_mfn;
p2m_access_t *first_a, *second_a, scratch_a;
@@ -569,10 +569,10 @@ do { \
#undef assign_pointers
/* Now do the gets */
- *first_mfn = get_gfn_type_access(p2m_get_hostp2m(rval->first_domain),
- gfn_x(rval->first_gfn), first_t, first_a, q, NULL);
- *second_mfn = get_gfn_type_access(p2m_get_hostp2m(rval->second_domain),
- gfn_x(rval->second_gfn), second_t, second_a, q, NULL);
+ *first_mfn = __get_gfn_type_access(p2m_get_hostp2m(rval->first_domain),
+ gfn_x(rval->first_gfn), first_t, first_a, q, NULL, lock);
+ *second_mfn = __get_gfn_type_access(p2m_get_hostp2m(rval->second_domain),
+ gfn_x(rval->second_gfn), second_t, second_a, q, NULL, lock);
}
static inline void put_two_gfns(struct two_gfns *arg)
During VM forking the client lock will already be taken. Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com> --- xen/arch/x86/mm/mem_sharing.c | 11 ++++++----- xen/include/asm-x86/p2m.h | 10 +++++----- 2 files changed, 11 insertions(+), 10 deletions(-)