@@ -394,7 +394,7 @@ long arch_do_domctl(
page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC);
if ( unlikely(!page) ||
- unlikely(is_xen_heap_page(page)) )
+ unlikely(is_special_page(page)) )
{
if ( unlikely(p2m_is_broken(t)) )
type = XEN_DOMCTL_PFINFO_BROKEN;
@@ -1014,7 +1014,7 @@ get_page_from_l1e(
unsigned long cacheattr = pte_flags_to_cacheattr(l1f);
int err;
- if ( is_xen_heap_page(page) )
+ if ( is_special_page(page) )
{
if ( write )
put_page_type(page);
@@ -2447,7 +2447,7 @@ static int cleanup_page_mappings(struct page_info *page)
{
page->count_info &= ~PGC_cacheattr_mask;
- BUG_ON(is_xen_heap_page(page));
+ BUG_ON(is_special_page(page));
rc = update_xen_mappings(mfn, 0);
}
@@ -2477,7 +2477,7 @@ static int cleanup_page_mappings(struct page_info *page)
rc = rc2;
}
- if ( likely(!is_xen_heap_page(page)) )
+ if ( likely(!is_special_page(page)) )
{
ASSERT((page->u.inuse.type_info &
(PGT_type_mask | PGT_count_mask)) == PGT_writable_page);
@@ -4216,8 +4216,7 @@ int steal_page(
if ( !(owner = page_get_owner_and_reference(page)) )
goto fail;
- if ( owner != d || is_xen_heap_page(page) ||
- (page->count_info & PGC_extra) )
+ if ( owner != d || is_special_page(page) )
goto fail_put;
/*
@@ -77,7 +77,7 @@ int altp2m_vcpu_enable_ve(struct vcpu *v, gfn_t gfn)
* pageable() predicate for this, due to it having the same properties
* that we want.
*/
- if ( !p2m_is_pageable(p2mt) || is_xen_heap_page(pg) )
+ if ( !p2m_is_pageable(p2mt) || is_special_page(pg) )
{
rc = -EINVAL;
goto err;
@@ -840,9 +840,8 @@ static int nominate_page(struct domain *d, gfn_t gfn,
if ( !p2m_is_sharable(p2mt) )
goto out;
- /* Skip xen heap pages */
page = mfn_to_page(mfn);
- if ( !page || is_xen_heap_page(page) )
+ if ( !page || is_special_page(page) )
goto out;
/* Check if there are mem_access/remapped altp2m entries for this page */
@@ -2087,19 +2087,22 @@ static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn, gfn_t gfn)
* The qemu helper process has an untyped mapping of this dom's RAM
* and the HVM restore program takes another.
* Also allow one typed refcount for
- * - Xen heap pages, to match share_xen_page_with_guest(),
- * - ioreq server pages, to match prepare_ring_for_helper().
+ * - special pages, which are explicitly referenced and mapped by
+ * Xen.
+ * - ioreq server pages, which may be special pages or normal
+ * guest pages with an extra reference taken by
+ * prepare_ring_for_helper().
*/
if ( !(shadow_mode_external(d)
&& (page->count_info & PGC_count_mask) <= 3
&& ((page->u.inuse.type_info & PGT_count_mask)
- == (is_xen_heap_page(page) ||
+ == (is_special_page(page) ||
(is_hvm_domain(d) && is_ioreq_server_page(d, page))))) )
printk(XENLOG_G_ERR "can't find all mappings of mfn %"PRI_mfn
- " (gfn %"PRI_gfn"): c=%lx t=%lx x=%d i=%d\n",
+ " (gfn %"PRI_gfn"): c=%lx t=%lx s=%d i=%d\n",
mfn_x(gmfn), gfn_x(gfn),
page->count_info, page->u.inuse.type_info,
- !!is_xen_heap_page(page),
+ !!is_special_page(page),
(is_hvm_domain(d) && is_ioreq_server_page(d, page)));
}
@@ -559,7 +559,7 @@ _sh_propagate(struct vcpu *v,
* caching attributes in the shadows to match what was asked for.
*/
if ( (level == 1) && is_hvm_domain(d) &&
- !is_xen_heap_mfn(target_mfn) )
+ !is_special_page(mfn_to_page(target_mfn)) )
{
int type;
@@ -189,7 +189,7 @@ static void update_pagetable_mac(vmac_ctx_t *ctx)
if ( !mfn_valid(_mfn(mfn)) )
continue;
- if ( is_page_in_use(page) && !is_xen_heap_page(page) )
+ if ( is_page_in_use(page) && !is_special_page(page) )
{
if ( page->count_info & PGC_page_table )
{
@@ -294,7 +294,7 @@ static void tboot_gen_xenheap_integrity(const uint8_t key[TB_KEY_SIZE],
+ 3 * PAGE_SIZE)) )
continue; /* skip tboot and its page tables */
- if ( is_page_in_use(page) && is_xen_heap_page(page) )
+ if ( is_page_in_use(page) && is_special_page(page) )
{
void *pg;
@@ -285,6 +285,11 @@ extern struct domain *dom_cow;
#include <asm/mm.h>
+static inline bool is_special_page(struct page_info *page)
+{
+ return is_xen_heap_page(page) || (page->count_info & PGC_extra);
+}
+
#ifndef page_list_entry
struct page_list_head
{