@@ -1879,12 +1879,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
if ( npfec.write_access && (p2mt == p2m_ram_shared) )
{
ASSERT(p2m_is_hostp2m(p2m));
- sharing_enomem =
- (mem_sharing_unshare_page(currd, gfn, 0) < 0);
+ sharing_enomem = mem_sharing_unshare_page(currd, gfn, 0);
rc = 1;
goto out_put_gfn;
}
-
+
/* Spurious fault? PoD and log-dirty also take this path. */
if ( p2m_is_ram(p2mt) )
{
@@ -1930,9 +1929,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
__put_gfn(p2m, gfn);
__put_gfn(hostp2m, gfn);
out:
- /* All of these are delayed until we exit, since we might
+ /*
+ * All of these are delayed until we exit, since we might
* sleep on event ring wait queues, and we must not hold
- * locks in such circumstance */
+ * locks in such circumstance.
+ */
if ( paged )
p2m_mem_paging_populate(currd, gfn);
if ( sharing_enomem )
@@ -59,8 +59,10 @@ static DEFINE_PER_CPU(pg_lock_data_t, __pld);
#define RMAP_USES_HASHTAB(page) \
((page)->sharing->hash_table.flag == NULL)
#define RMAP_HEAVY_SHARED_PAGE RMAP_HASHTAB_SIZE
-/* A bit of hysteresis. We don't want to be mutating between list and hash
- * table constantly. */
+/*
+ * A bit of hysteresis. We don't want to be mutating between list and hash
+ * table constantly.
+ */
#define RMAP_LIGHT_SHARED_PAGE (RMAP_HEAVY_SHARED_PAGE >> 2)
#if MEM_SHARING_AUDIT
@@ -88,7 +90,7 @@ static inline void page_sharing_dispose(struct page_info *page)
{
/* Unlikely given our thresholds, but we should be careful. */
if ( unlikely(RMAP_USES_HASHTAB(page)) )
- free_xenheap_pages(page->sharing->hash_table.bucket,
+ free_xenheap_pages(page->sharing->hash_table.bucket,
RMAP_HASHTAB_ORDER);
spin_lock(&shr_audit_lock);
@@ -105,7 +107,7 @@ static inline void page_sharing_dispose(struct page_info *page)
{
/* Unlikely given our thresholds, but we should be careful. */
if ( unlikely(RMAP_USES_HASHTAB(page)) )
- free_xenheap_pages(page->sharing->hash_table.bucket,
+ free_xenheap_pages(page->sharing->hash_table.bucket,
RMAP_HASHTAB_ORDER);
xfree(page->sharing);
}
@@ -122,8 +124,8 @@ static inline void page_sharing_dispose(struct page_info *page)
* Nesting may happen when sharing (and locking) two pages.
* Deadlock is avoided by locking pages in increasing order.
* All memory sharing code paths take the p2m lock of the affected gfn before
- * taking the lock for the underlying page. We enforce ordering between page_lock
- * and p2m_lock using an mm-locks.h construct.
+ * taking the lock for the underlying page. We enforce ordering between
+ * page_lock and p2m_lock using an mm-locks.h construct.
*
* TODO: Investigate if PGT_validated is necessary.
*/
@@ -168,7 +170,7 @@ static inline bool mem_sharing_page_lock(struct page_info *pg)
if ( rc )
{
preempt_disable();
- page_sharing_mm_post_lock(&pld->mm_unlock_level,
+ page_sharing_mm_post_lock(&pld->mm_unlock_level,
&pld->recurse_count);
}
return rc;
@@ -178,7 +180,7 @@ static inline void mem_sharing_page_unlock(struct page_info *pg)
{
pg_lock_data_t *pld = &(this_cpu(__pld));
- page_sharing_mm_unlock(pld->mm_unlock_level,
+ page_sharing_mm_unlock(pld->mm_unlock_level,
&pld->recurse_count);
preempt_enable();
_page_unlock(pg);
@@ -186,7 +188,7 @@ static inline void mem_sharing_page_unlock(struct page_info *pg)
static inline shr_handle_t get_next_handle(void)
{
- /* Get the next handle get_page style */
+ /* Get the next handle get_page style */
uint64_t x, y = next_handle;
do {
x = y;
@@ -198,24 +200,26 @@ static inline shr_handle_t get_next_handle(void)
#define mem_sharing_enabled(d) \
(is_hvm_domain(d) && (d)->arch.hvm.mem_sharing_enabled)
-static atomic_t nr_saved_mfns = ATOMIC_INIT(0);
+static atomic_t nr_saved_mfns = ATOMIC_INIT(0);
static atomic_t nr_shared_mfns = ATOMIC_INIT(0);
-/** Reverse map **/
-/* Every shared frame keeps a reverse map (rmap) of <domain, gfn> tuples that
+/*
+ * Reverse map
+ *
+ * Every shared frame keeps a reverse map (rmap) of <domain, gfn> tuples that
* this shared frame backs. For pages with a low degree of sharing, a O(n)
* search linked list is good enough. For pages with higher degree of sharing,
- * we use a hash table instead. */
+ * we use a hash table instead.
+ */
typedef struct gfn_info
{
unsigned long gfn;
- domid_t domain;
+ domid_t domain;
struct list_head list;
} gfn_info_t;
-static inline void
-rmap_init(struct page_info *page)
+static inline void rmap_init(struct page_info *page)
{
/* We always start off as a doubly linked list. */
INIT_LIST_HEAD(&page->sharing->gfns);
@@ -225,10 +229,11 @@ rmap_init(struct page_info *page)
#define HASH(domain, gfn) \
(((gfn) + (domain)) % RMAP_HASHTAB_SIZE)
-/* Conversions. Tuned by the thresholds. Should only happen twice
- * (once each) during the lifetime of a shared page */
-static inline int
-rmap_list_to_hash_table(struct page_info *page)
+/*
+ * Conversions. Tuned by the thresholds. Should only happen twice
+ * (once each) during the lifetime of a shared page.
+ */
+static inline int rmap_list_to_hash_table(struct page_info *page)
{
unsigned int i;
struct list_head *pos, *tmp, *b =
@@ -254,8 +259,7 @@ rmap_list_to_hash_table(struct page_info *page)
return 0;
}
-static inline void
-rmap_hash_table_to_list(struct page_info *page)
+static inline void rmap_hash_table_to_list(struct page_info *page)
{
unsigned int i;
struct list_head *bucket = page->sharing->hash_table.bucket;
@@ -276,8 +280,7 @@ rmap_hash_table_to_list(struct page_info *page)
}
/* Generic accessors to the rmap */
-static inline unsigned long
-rmap_count(struct page_info *pg)
+static inline unsigned long rmap_count(struct page_info *pg)
{
unsigned long count;
unsigned long t = read_atomic(&pg->u.inuse.type_info);
@@ -287,11 +290,13 @@ rmap_count(struct page_info *pg)
return count;
}
-/* The page type count is always decreased after removing from the rmap.
- * Use a convert flag to avoid mutating the rmap if in the middle of an
- * iterator, or if the page will be soon destroyed anyways. */
-static inline void
-rmap_del(gfn_info_t *gfn_info, struct page_info *page, int convert)
+/*
+ * The page type count is always decreased after removing from the rmap.
+ * Use a convert flag to avoid mutating the rmap if in the middle of an
+ * iterator, or if the page will be soon destroyed anyways.
+ */
+static inline
+void rmap_del(gfn_info_t *gfn_info, struct page_info *page, int convert)
{
if ( RMAP_USES_HASHTAB(page) && convert &&
(rmap_count(page) <= RMAP_LIGHT_SHARED_PAGE) )
@@ -302,8 +307,7 @@ rmap_del(gfn_info_t *gfn_info, struct page_info *page, int convert)
}
/* The page type count is always increased before adding to the rmap. */
-static inline void
-rmap_add(gfn_info_t *gfn_info, struct page_info *page)
+static inline void rmap_add(gfn_info_t *gfn_info, struct page_info *page)
{
struct list_head *head;
@@ -314,7 +318,7 @@ rmap_add(gfn_info_t *gfn_info, struct page_info *page)
(void)rmap_list_to_hash_table(page);
head = (RMAP_USES_HASHTAB(page)) ?
- page->sharing->hash_table.bucket +
+ page->sharing->hash_table.bucket +
HASH(gfn_info->domain, gfn_info->gfn) :
&page->sharing->gfns;
@@ -322,9 +326,9 @@ rmap_add(gfn_info_t *gfn_info, struct page_info *page)
list_add(&gfn_info->list, head);
}
-static inline gfn_info_t *
-rmap_retrieve(uint16_t domain_id, unsigned long gfn,
- struct page_info *page)
+static inline
+gfn_info_t *rmap_retrieve(uint16_t domain_id, unsigned long gfn,
+ struct page_info *page)
{
gfn_info_t *gfn_info;
struct list_head *le, *head;
@@ -364,18 +368,18 @@ struct rmap_iterator {
unsigned int bucket;
};
-static inline void
-rmap_seed_iterator(struct page_info *page, struct rmap_iterator *ri)
+static inline
+void rmap_seed_iterator(struct page_info *page, struct rmap_iterator *ri)
{
ri->curr = (RMAP_USES_HASHTAB(page)) ?
page->sharing->hash_table.bucket :
&page->sharing->gfns;
- ri->next = ri->curr->next;
+ ri->next = ri->curr->next;
ri->bucket = 0;
}
-static inline gfn_info_t *
-rmap_iterate(struct page_info *page, struct rmap_iterator *ri)
+static inline
+gfn_info_t *rmap_iterate(struct page_info *page, struct rmap_iterator *ri)
{
struct list_head *head = (RMAP_USES_HASHTAB(page)) ?
page->sharing->hash_table.bucket + ri->bucket :
@@ -405,14 +409,14 @@ retry:
return list_entry(ri->curr, gfn_info_t, list);
}
-static inline gfn_info_t *mem_sharing_gfn_alloc(struct page_info *page,
- struct domain *d,
- unsigned long gfn)
+static inline
+gfn_info_t *mem_sharing_gfn_alloc(struct page_info *page, struct domain *d,
+ unsigned long gfn)
{
gfn_info_t *gfn_info = xmalloc(gfn_info_t);
if ( gfn_info == NULL )
- return NULL;
+ return NULL;
gfn_info->gfn = gfn;
gfn_info->domain = d->domain_id;
@@ -425,9 +429,9 @@ static inline gfn_info_t *mem_sharing_gfn_alloc(struct page_info *page,
return gfn_info;
}
-static inline void mem_sharing_gfn_destroy(struct page_info *page,
- struct domain *d,
- gfn_info_t *gfn_info)
+static inline
+void mem_sharing_gfn_destroy(struct page_info *page, struct domain *d,
+ gfn_info_t *gfn_info)
{
/* Decrement the number of pages. */
atomic_dec(&d->shr_pages);
@@ -437,25 +441,29 @@ static inline void mem_sharing_gfn_destroy(struct page_info *page,
xfree(gfn_info);
}
-static struct page_info* mem_sharing_lookup(unsigned long mfn)
+static inline struct page_info* mem_sharing_lookup(unsigned long mfn)
{
- if ( mfn_valid(_mfn(mfn)) )
- {
- struct page_info* page = mfn_to_page(_mfn(mfn));
- if ( page_get_owner(page) == dom_cow )
- {
- /* Count has to be at least two, because we're called
- * with the mfn locked (1) and this is supposed to be
- * a shared page (1). */
- unsigned long t = read_atomic(&page->u.inuse.type_info);
- ASSERT((t & PGT_type_mask) == PGT_shared_page);
- ASSERT((t & PGT_count_mask) >= 2);
- ASSERT(SHARED_M2P(get_gpfn_from_mfn(mfn)));
- return page;
- }
- }
+ struct page_info* page;
+ unsigned long t;
- return NULL;
+ if ( !mfn_valid(_mfn(mfn)) )
+ return NULL;
+
+ page = mfn_to_page(_mfn(mfn));
+ if ( page_get_owner(page) != dom_cow )
+ return NULL;
+
+ /*
+ * Count has to be at least two, because we're called
+ * with the mfn locked (1) and this is supposed to be
+ * a shared page (1).
+ */
+ t = read_atomic(&page->u.inuse.type_info);
+ ASSERT((t & PGT_type_mask) == PGT_shared_page);
+ ASSERT((t & PGT_count_mask) >= 2);
+ ASSERT(SHARED_M2P(get_gpfn_from_mfn(mfn)));
+
+ return page;
}
static int audit(void)
@@ -492,7 +500,7 @@ static int audit(void)
continue;
}
- /* Check if the MFN has correct type, owner and handle. */
+ /* Check if the MFN has correct type, owner and handle. */
if ( (pg->u.inuse.type_info & PGT_type_mask) != PGT_shared_page )
{
MEM_SHARING_DEBUG("mfn %lx in audit list, but not PGT_shared_page (%lx)!\n",
@@ -545,7 +553,7 @@ static int audit(void)
errors++;
continue;
}
- o_mfn = get_gfn_query_unlocked(d, g->gfn, &t);
+ o_mfn = get_gfn_query_unlocked(d, g->gfn, &t);
if ( !mfn_eq(o_mfn, mfn) )
{
MEM_SHARING_DEBUG("Incorrect P2M for d=%hu, PFN=%lx."
@@ -568,7 +576,7 @@ static int audit(void)
{
MEM_SHARING_DEBUG("Mismatched counts for MFN=%lx."
"nr_gfns in list %lu, in type_info %lx\n",
- mfn_x(mfn), nr_gfns,
+ mfn_x(mfn), nr_gfns,
(pg->u.inuse.type_info & PGT_count_mask));
errors++;
}
@@ -603,7 +611,7 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
.u.mem_sharing.p2mt = p2m_ram_shared
};
- if ( (rc = __vm_event_claim_slot(d,
+ if ( (rc = __vm_event_claim_slot(d,
d->vm_event_share, allow_sleep)) < 0 )
return rc;
@@ -629,9 +637,9 @@ unsigned int mem_sharing_get_nr_shared_mfns(void)
}
/* Functions that change a page's type and ownership */
-static int page_make_sharable(struct domain *d,
- struct page_info *page,
- int expected_refcnt)
+static int page_make_sharable(struct domain *d,
+ struct page_info *page,
+ int expected_refcnt)
{
bool_t drop_dom_ref;
@@ -658,8 +666,10 @@ static int page_make_sharable(struct domain *d,
return -EEXIST;
}
- /* Check if the ref count is 2. The first from PGC_allocated, and
- * the second from get_page_and_type at the top of this function */
+ /*
+ * Check if the ref count is 2. The first from PGC_allocated, and
+ * the second from get_page_and_type at the top of this function.
+ */
if ( page->count_info != (PGC_allocated | (2 + expected_refcnt)) )
{
spin_unlock(&d->page_alloc_lock);
@@ -675,6 +685,7 @@ static int page_make_sharable(struct domain *d,
if ( drop_dom_ref )
put_domain(d);
+
return 0;
}
@@ -684,7 +695,7 @@ static int page_make_private(struct domain *d, struct page_info *page)
if ( !get_page(page, dom_cow) )
return -EINVAL;
-
+
spin_lock(&d->page_alloc_lock);
if ( d->is_dying )
@@ -727,10 +738,13 @@ static inline struct page_info *__grab_shared_page(mfn_t mfn)
if ( !mfn_valid(mfn) )
return NULL;
+
pg = mfn_to_page(mfn);
- /* If the page is not validated we can't lock it, and if it's
- * not validated it's obviously not shared. */
+ /*
+ * If the page is not validated we can't lock it, and if it's
+ * not validated it's obviously not shared.
+ */
if ( !mem_sharing_page_lock(pg) )
return NULL;
@@ -754,10 +768,10 @@ static int debug_mfn(mfn_t mfn)
return -EINVAL;
}
- MEM_SHARING_DEBUG(
+ MEM_SHARING_DEBUG(
"Debug page: MFN=%lx is ci=%lx, ti=%lx, owner_id=%d\n",
- mfn_x(page_to_mfn(page)),
- page->count_info,
+ mfn_x(page_to_mfn(page)),
+ page->count_info,
page->u.inuse.type_info,
page_get_owner(page)->domain_id);
@@ -775,7 +789,7 @@ static int debug_gfn(struct domain *d, gfn_t gfn)
mfn = get_gfn_query(d, gfn_x(gfn), &p2mt);
- MEM_SHARING_DEBUG("Debug for dom%d, gfn=%" PRI_gfn "\n",
+ MEM_SHARING_DEBUG("Debug for dom%d, gfn=%" PRI_gfn "\n",
d->domain_id, gfn_x(gfn));
num_refs = debug_mfn(mfn);
put_gfn(d, gfn_x(gfn));
@@ -796,9 +810,9 @@ static int debug_gref(struct domain *d, grant_ref_t ref)
d->domain_id, ref, rc);
return rc;
}
-
+
MEM_SHARING_DEBUG(
- "==> Grant [dom=%d,ref=%d], status=%x. ",
+ "==> Grant [dom=%d,ref=%d], status=%x. ",
d->domain_id, ref, status);
return debug_gfn(d, gfn);
@@ -824,15 +838,12 @@ static int nominate_page(struct domain *d, gfn_t gfn,
goto out;
/* Return the handle if the page is already shared */
- if ( p2m_is_shared(p2mt) ) {
+ if ( p2m_is_shared(p2mt) )
+ {
struct page_info *pg = __grab_shared_page(mfn);
if ( !pg )
- {
- gprintk(XENLOG_ERR,
- "Shared p2m entry gfn %" PRI_gfn ", but could not grab mfn %" PRI_mfn " dom%d\n",
- gfn_x(gfn), mfn_x(mfn), d->domain_id);
BUG();
- }
+
*phandle = pg->sharing->handle;
ret = 0;
mem_sharing_page_unlock(pg);
@@ -843,7 +854,6 @@ static int nominate_page(struct domain *d, gfn_t gfn,
if ( !p2m_is_sharable(p2mt) )
goto out;
-#ifdef CONFIG_HVM
/* Check if there are mem_access/remapped altp2m entries for this page */
if ( altp2m_active(d) )
{
@@ -872,42 +882,42 @@ static int nominate_page(struct domain *d, gfn_t gfn,
altp2m_list_unlock(d);
}
-#endif
/* Try to convert the mfn to the sharable type */
page = mfn_to_page(mfn);
- ret = page_make_sharable(d, page, expected_refcnt);
- if ( ret )
+ ret = page_make_sharable(d, page, expected_refcnt);
+ if ( ret )
goto out;
- /* Now that the page is validated, we can lock it. There is no
- * race because we're holding the p2m entry, so no one else
- * could be nominating this gfn */
+ /*
+ * Now that the page is validated, we can lock it. There is no
+ * race because we're holding the p2m entry, so no one else
+ * could be nominating this gfn.
+ */
ret = -ENOENT;
if ( !mem_sharing_page_lock(page) )
goto out;
/* Initialize the shared state */
ret = -ENOMEM;
- if ( (page->sharing =
- xmalloc(struct page_sharing_info)) == NULL )
+ if ( !(page->sharing = xmalloc(struct page_sharing_info)) )
{
/* Making a page private atomically unlocks it */
- BUG_ON(page_make_private(d, page) != 0);
+ BUG_ON(page_make_private(d, page));
goto out;
}
page->sharing->pg = page;
rmap_init(page);
/* Create the handle */
- page->sharing->handle = get_next_handle();
+ page->sharing->handle = get_next_handle();
/* Create the local gfn info */
- if ( mem_sharing_gfn_alloc(page, d, gfn_x(gfn)) == NULL )
+ if ( !mem_sharing_gfn_alloc(page, d, gfn_x(gfn)) )
{
xfree(page->sharing);
page->sharing = NULL;
- BUG_ON(page_make_private(d, page) != 0);
+ BUG_ON(page_make_private(d, page));
goto out;
}
@@ -946,15 +956,19 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
- /* This tricky business is to avoid two callers deadlocking if
- * grabbing pages in opposite client/source order */
+ /*
+ * This tricky business is to avoid two callers deadlocking if
+ * grabbing pages in opposite client/source order.
+ */
if ( mfn_eq(smfn, cmfn) )
{
- /* The pages are already the same. We could return some
+ /*
+ * The pages are already the same. We could return some
* kind of error here, but no matter how you look at it,
* the pages are already 'shared'. It possibly represents
* a big problem somewhere else, but as far as sharing is
- * concerned: great success! */
+ * concerned: great success!
+ */
ret = 0;
goto err_out;
}
@@ -1010,11 +1024,15 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
rmap_seed_iterator(cpage, &ri);
while ( (gfn = rmap_iterate(cpage, &ri)) != NULL)
{
- /* Get the source page and type, this should never fail:
- * we are under shr lock, and got a successful lookup */
+ /*
+ * Get the source page and type, this should never fail:
+ * we are under shr lock, and got a successful lookup.
+ */
BUG_ON(!get_page_and_type(spage, dom_cow, PGT_shared_page));
- /* Move the gfn_info from client list to source list.
- * Don't change the type of rmap for the client page. */
+ /*
+ * Move the gfn_info from client list to source list.
+ * Don't change the type of rmap for the client page.
+ */
rmap_del(gfn, cpage, 0);
rmap_add(gfn, spage);
put_count++;
@@ -1043,14 +1061,14 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
atomic_dec(&nr_shared_mfns);
atomic_inc(&nr_saved_mfns);
ret = 0;
-
+
err_out:
put_two_gfns(&tg);
return ret;
}
int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle_t sh,
- struct domain *cd, unsigned long cgfn)
+ struct domain *cd, unsigned long cgfn)
{
struct page_info *spage;
int ret = -EINVAL;
@@ -1069,15 +1087,18 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle
spage = __grab_shared_page(smfn);
if ( spage == NULL )
goto err_out;
+
ASSERT(smfn_type == p2m_ram_shared);
/* Check that the handles match */
if ( spage->sharing->handle != sh )
goto err_unlock;
- /* Make sure the target page is a hole in the physmap. These are typically
+ /*
+ * Make sure the target page is a hole in the physmap. These are typically
* p2m_mmio_dm, but also accept p2m_invalid and paged out pages. See the
- * definition of p2m_is_hole in p2m.h. */
+ * definition of p2m_is_hole in p2m.h.
+ */
if ( !p2m_is_hole(cmfn_type) )
{
ret = XENMEM_SHARING_OP_C_HANDLE_INVALID;
@@ -1086,7 +1107,7 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle
/* This is simpler than regular sharing */
BUG_ON(!get_page_and_type(spage, dom_cow, PGT_shared_page));
- if ( (gfn_info = mem_sharing_gfn_alloc(spage, cd, cgfn)) == NULL )
+ if ( !(gfn_info = mem_sharing_gfn_alloc(spage, cd, cgfn)) )
{
put_page_and_type(spage);
ret = -ENOMEM;
@@ -1102,11 +1123,17 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle
mem_sharing_gfn_destroy(spage, cd, gfn_info);
put_page_and_type(spage);
} else {
- /* There is a chance we're plugging a hole where a paged out page was */
+ /*
+ * There is a chance we're plugging a hole where a paged out
+ * page was.
+ */
if ( p2m_is_paging(cmfn_type) && (cmfn_type != p2m_ram_paging_out) )
{
atomic_dec(&cd->paged_pages);
- /* Further, there is a chance this was a valid page. Don't leak it. */
+ /*
+ * Further, there is a chance this was a valid page.
+ * Don't leak it.
+ */
if ( mfn_valid(cmfn) )
{
struct page_info *cpage = mfn_to_page(cmfn);
@@ -1133,13 +1160,14 @@ err_out:
}
-/* A note on the rationale for unshare error handling:
+/*
+ * A note on the rationale for unshare error handling:
* 1. Unshare can only fail with ENOMEM. Any other error conditions BUG_ON()'s
* 2. We notify a potential dom0 helper through a vm_event ring. But we
- * allow the notification to not go to sleep. If the event ring is full
+ * allow the notification to not go to sleep. If the event ring is full
* of ENOMEM warnings, then it's on the ball.
* 3. We cannot go to sleep until the unshare is resolved, because we might
- * be buried deep into locks (e.g. something -> copy_to_user -> __hvm_copy)
+ * be buried deep into locks (e.g. something -> copy_to_user -> __hvm_copy)
* 4. So, we make sure we:
* 4.1. return an error
* 4.2. do not corrupt shared memory
@@ -1147,19 +1175,20 @@ err_out:
* 4.4. let the guest deal with it if the error propagation will reach it
*/
int __mem_sharing_unshare_page(struct domain *d,
- unsigned long gfn,
- uint16_t flags)
+ unsigned long gfn,
+ uint16_t flags)
{
p2m_type_t p2mt;
mfn_t mfn;
struct page_info *page, *old_page;
int last_gfn;
gfn_info_t *gfn_info = NULL;
-
+
mfn = get_gfn(d, gfn, &p2mt);
-
+
/* Has someone already unshared it? */
- if ( !p2m_is_shared(p2mt) ) {
+ if ( !p2m_is_shared(p2mt) )
+ {
put_gfn(d, gfn);
return 0;
}
@@ -1167,26 +1196,30 @@ int __mem_sharing_unshare_page(struct domain *d,
page = __grab_shared_page(mfn);
if ( page == NULL )
{
- gdprintk(XENLOG_ERR, "Domain p2m is shared, but page is not: "
- "%lx\n", gfn);
+ gdprintk(XENLOG_ERR, "Domain p2m is shared, but page is not: %lx\n",
+ gfn);
BUG();
}
gfn_info = rmap_retrieve(d->domain_id, gfn, page);
if ( unlikely(gfn_info == NULL) )
{
- gdprintk(XENLOG_ERR, "Could not find gfn_info for shared gfn: "
- "%lx\n", gfn);
+ gdprintk(XENLOG_ERR, "Could not find gfn_info for shared gfn: %lx\n",
+ gfn);
BUG();
}
- /* Do the accounting first. If anything fails below, we have bigger
- * bigger fish to fry. First, remove the gfn from the list. */
+ /*
+ * Do the accounting first. If anything fails below, we have bigger
+ * bigger fish to fry. First, remove the gfn from the list.
+ */
last_gfn = rmap_has_one_entry(page);
if ( last_gfn )
{
- /* Clean up shared state. Get rid of the <domid, gfn> tuple
- * before destroying the rmap. */
+ /*
+ * Clean up shared state. Get rid of the <domid, gfn> tuple
+ * before destroying the rmap.
+ */
mem_sharing_gfn_destroy(page, d, gfn_info);
page_sharing_dispose(page);
page->sharing = NULL;
@@ -1195,8 +1228,10 @@ int __mem_sharing_unshare_page(struct domain *d,
else
atomic_dec(&nr_saved_mfns);
- /* If the GFN is getting destroyed drop the references to MFN
- * (possibly freeing the page), and exit early */
+ /*
+ * If the GFN is getting destroyed drop the references to MFN
+ * (possibly freeing the page), and exit early.
+ */
if ( flags & MEM_SHARING_DESTROY_GFN )
{
if ( !last_gfn )
@@ -1212,7 +1247,7 @@ int __mem_sharing_unshare_page(struct domain *d,
return 0;
}
-
+
if ( last_gfn )
{
/* Making a page private atomically unlocks it */
@@ -1222,14 +1257,16 @@ int __mem_sharing_unshare_page(struct domain *d,
old_page = page;
page = alloc_domheap_page(d, 0);
- if ( !page )
+ if ( !page )
{
/* Undo dec of nr_saved_mfns, as the retry will decrease again. */
atomic_inc(&nr_saved_mfns);
mem_sharing_page_unlock(old_page);
put_gfn(d, gfn);
- /* Caller is responsible for placing an event
- * in the ring */
+ /*
+ * Caller is responsible for placing an event
+ * in the ring.
+ */
return -ENOMEM;
}
@@ -1240,11 +1277,11 @@ int __mem_sharing_unshare_page(struct domain *d,
mem_sharing_page_unlock(old_page);
put_page_and_type(old_page);
-private_page_found:
+private_page_found:
if ( p2m_change_type_one(d, gfn, p2m_ram_shared, p2m_ram_rw) )
{
- gdprintk(XENLOG_ERR, "Could not change p2m type d %hu gfn %lx.\n",
- d->domain_id, gfn);
+ gdprintk(XENLOG_ERR, "Could not change p2m type d %hu gfn %lx.\n",
+ d->domain_id, gfn);
BUG();
}
@@ -1277,20 +1314,23 @@ int relinquish_shared_pages(struct domain *d)
mfn_t mfn;
int set_rc;
- if ( atomic_read(&d->shr_pages) == 0 )
+ if ( !atomic_read(&d->shr_pages) )
break;
+
mfn = p2m->get_entry(p2m, _gfn(gfn), &t, &a, 0, NULL, NULL);
- if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
+ if ( mfn_valid(mfn) && t == p2m_ram_shared )
{
/* Does not fail with ENOMEM given the DESTROY flag */
- BUG_ON(__mem_sharing_unshare_page(d, gfn,
- MEM_SHARING_DESTROY_GFN));
- /* Clear out the p2m entry so no one else may try to
+ BUG_ON(__mem_sharing_unshare_page(d, gfn,
+ MEM_SHARING_DESTROY_GFN));
+ /*
+ * Clear out the p2m entry so no one else may try to
* unshare. Must succeed: we just read the old entry and
- * we hold the p2m lock. */
+ * we hold the p2m lock.
+ */
set_rc = p2m->set_entry(p2m, _gfn(gfn), _mfn(0), PAGE_ORDER_4K,
p2m_invalid, p2m_access_rwx, -1);
- ASSERT(set_rc == 0);
+ ASSERT(!set_rc);
count += 0x10;
}
else
@@ -1454,7 +1494,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) )
{
- grant_ref_t gref = (grant_ref_t)
+ grant_ref_t gref = (grant_ref_t)
(XENMEM_SHARING_OP_FIELD_GET_GREF(
mso.u.share.source_gfn));
rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &sgfn,
@@ -1470,7 +1510,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.client_gfn) )
{
- grant_ref_t gref = (grant_ref_t)
+ grant_ref_t gref = (grant_ref_t)
(XENMEM_SHARING_OP_FIELD_GET_GREF(
mso.u.share.client_gfn));
rc = mem_sharing_gref_to_gfn(cd->grant_table, gref, &cgfn,
@@ -1534,7 +1574,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
sh = mso.u.share.source_handle;
cgfn = mso.u.share.client_gfn;
- rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn);
+ rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn);
rcu_unlock_domain(cd);
}
@@ -506,8 +506,10 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn_l,
if ( (q & P2M_UNSHARE) && p2m_is_shared(*t) )
{
ASSERT(p2m_is_hostp2m(p2m));
- /* Try to unshare. If we fail, communicate ENOMEM without
- * sleeping. */
+ /*
+ * Try to unshare. If we fail, communicate ENOMEM without
+ * sleeping.
+ */
if ( mem_sharing_unshare_page(p2m->domain, gfn_l, 0) < 0 )
mem_sharing_notify_enomem(p2m->domain, gfn_l, false);
mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL);
@@ -887,15 +889,15 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
&a, 0, NULL, NULL);
if ( p2m_is_shared(ot) )
{
- /* Do an unshare to cleanly take care of all corner
- * cases. */
+ /* Do an unshare to cleanly take care of all corner cases. */
int rc;
rc = mem_sharing_unshare_page(p2m->domain,
gfn_x(gfn_add(gfn, i)), 0);
if ( rc )
{
p2m_unlock(p2m);
- /* NOTE: Should a guest domain bring this upon itself,
+ /*
+ * NOTE: Should a guest domain bring this upon itself,
* there is not a whole lot we can do. We are buried
* deep in locks from most code paths by now. So, fail
* the call and don't try to sleep on a wait queue
@@ -904,8 +906,9 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
* However, all current (changeset 3432abcf9380) code
* paths avoid this unsavoury situation. For now.
*
- * Foreign domains are okay to place an event as they
- * won't go to sleep. */
+ * Foreign domains are okay to place an event as they
+ * won't go to sleep.
+ */
(void)mem_sharing_notify_enomem(p2m->domain,
gfn_x(gfn_add(gfn, i)), false);
return rc;
@@ -33,12 +33,14 @@
#define MEM_SHARING_AUDIT 0
#endif
-typedef uint64_t shr_handle_t;
+typedef uint64_t shr_handle_t;
typedef struct rmap_hashtab {
struct list_head *bucket;
- /* Overlaps with prev pointer of list_head in union below.
- * Unlike the prev pointer, this can be NULL. */
+ /*
+ * Overlaps with prev pointer of list_head in union below.
+ * Unlike the prev pointer, this can be NULL.
+ */
void *flag;
} rmap_hashtab_t;
@@ -57,34 +59,34 @@ struct page_sharing_info
};
};
-#define sharing_supported(_d) \
- (is_hvm_domain(_d) && paging_mode_hap(_d))
-
unsigned int mem_sharing_get_nr_saved_mfns(void);
unsigned int mem_sharing_get_nr_shared_mfns(void);
#define MEM_SHARING_DESTROY_GFN (1<<1)
/* Only fails with -ENOMEM. Enforce it with a BUG_ON wrapper. */
int __mem_sharing_unshare_page(struct domain *d,
- unsigned long gfn,
- uint16_t flags);
-static inline int mem_sharing_unshare_page(struct domain *d,
- unsigned long gfn,
- uint16_t flags)
+ unsigned long gfn,
+ uint16_t flags);
+
+static inline
+int mem_sharing_unshare_page(struct domain *d,
+ unsigned long gfn,
+ uint16_t flags)
{
int rc = __mem_sharing_unshare_page(d, gfn, flags);
BUG_ON( rc && (rc != -ENOMEM) );
return rc;
}
-/* If called by a foreign domain, possible errors are
+/*
+ * If called by a foreign domain, possible errors are
* -EBUSY -> ring full
* -ENOSYS -> no ring to begin with
* and the foreign mapper is responsible for retrying.
*
- * If called by the guest vcpu itself and allow_sleep is set, may
- * sleep on a wait queue, so the caller is responsible for not
- * holding locks on entry. It may only fail with ENOSYS
+ * If called by the guest vcpu itself and allow_sleep is set, may
+ * sleep on a wait queue, so the caller is responsible for not
+ * holding locks on entry. It may only fail with ENOSYS
*
* If called by the guest vcpu itself and allow_sleep is not set,
* then it's the same as a foreign domain.
@@ -92,10 +94,11 @@ static inline int mem_sharing_unshare_page(struct domain *d,
int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
bool allow_sleep);
int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg);
-int mem_sharing_domctl(struct domain *d,
+int mem_sharing_domctl(struct domain *d,
struct xen_domctl_mem_sharing_op *mec);
-/* Scans the p2m and relinquishes any shared pages, destroying
+/*
+ * Scans the p2m and relinquishes any shared pages, destroying
* those for which this domain holds the final reference.
* Preemptible.
*/
@@ -107,18 +110,22 @@ static inline unsigned int mem_sharing_get_nr_saved_mfns(void)
{
return 0;
}
+
static inline unsigned int mem_sharing_get_nr_shared_mfns(void)
{
return 0;
}
-static inline int mem_sharing_unshare_page(struct domain *d,
- unsigned long gfn,
- uint16_t flags)
+
+static inline
+int mem_sharing_unshare_page(struct domain *d, unsigned long gfn,
+ uint16_t flags)
{
ASSERT_UNREACHABLE();
return -EOPNOTSUPP;
}
-static inline int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
+
+static inline
+int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
bool allow_sleep)
{
ASSERT_UNREACHABLE();
No functional changes. Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com> --- xen/arch/x86/hvm/hvm.c | 11 +- xen/arch/x86/mm/mem_sharing.c | 342 +++++++++++++++++------------- xen/arch/x86/mm/p2m.c | 17 +- xen/include/asm-x86/mem_sharing.h | 49 +++-- 4 files changed, 235 insertions(+), 184 deletions(-)