@@ -1213,6 +1213,7 @@ int add_to_physmap(struct domain *sd, un
}
else
{
+#ifdef CONFIG_MEM_PAGING
/*
* There is a chance we're plugging a hole where a paged out
* page was.
@@ -1238,6 +1239,7 @@ int add_to_physmap(struct domain *sd, un
put_page(cpage);
}
}
+#endif
}
atomic_inc(&nr_saved_mfns);
@@ -691,11 +691,13 @@ p2m_add_page(struct domain *d, gfn_t gfn
/* Count how man PoD entries we'll be replacing if successful */
pod_count++;
}
+#ifdef CONFIG_MEM_PAGING
else if ( p2m_is_paging(ot) && (ot != p2m_ram_paging_out) )
{
/* We're plugging a hole in the physmap where a paged out page was */
atomic_dec(&d->paged_pages);
}
+#endif
}
/* Then, look for m->p mappings for this range and deal with them */
@@ -112,7 +112,9 @@ void getdomaininfo(struct domain *d, str
#ifdef CONFIG_MEM_SHARING
info->shr_pages = atomic_read(&d->shr_pages);
#endif
+#ifdef CONFIG_MEM_PAGING
info->paged_pages = atomic_read(&d->paged_pages);
+#endif
info->shared_info_frame =
gfn_x(mfn_to_gfn(d, _mfn(virt_to_mfn(d->shared_info))));
BUG_ON(SHARED_M2P(info->shared_info_frame));
@@ -278,14 +278,18 @@ static void dump_domains(unsigned char k
#ifdef CONFIG_MEM_SHARING
" shared_pages=%u"
#endif
+#ifdef CONFIG_MEM_PAGING
" paged_pages=%u"
+#endif
" dirty_cpus={%*pbl} max_pages=%u\n",
domain_tot_pages(d), d->xenheap_pages,
#ifdef CONFIG_MEM_SHARING
atomic_read(&d->shr_pages),
#endif
- atomic_read(&d->paged_pages), CPUMASK_PR(d->dirty_cpumask),
- d->max_pages);
+#ifdef CONFIG_MEM_PAGING
+ atomic_read(&d->paged_pages),
+#endif
+ CPUMASK_PR(d->dirty_cpumask), d->max_pages);
printk(" handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
"%02x%02x-%02x%02x%02x%02x%02x%02x vm_assist=%08lx\n",
d->handle[ 0], d->handle[ 1], d->handle[ 2], d->handle[ 3],
@@ -390,7 +390,9 @@ struct domain
atomic_t shr_pages; /* shared pages */
#endif
+#ifdef CONFIG_MEM_PAGING
atomic_t paged_pages; /* paged-out pages */
+#endif
/* Scheduling. */
void *sched_priv; /* scheduler-specific data */