@@ -989,12 +989,6 @@ int donate_page(struct domain *d, struct page_info *page, unsigned int memflags)
return -ENOSYS;
}
-int steal_page(
- struct domain *d, struct page_info *page, unsigned int memflags)
-{
- return -1;
-}
-
int page_is_ram_type(unsigned long mfn, unsigned long mem_type)
{
ASSERT(0);
@@ -4253,57 +4253,6 @@ int donate_page(
return -1;
}
-int steal_page(
- struct domain *d, struct page_info *page, unsigned int memflags)
-{
- unsigned long x, y;
- bool_t drop_dom_ref = 0;
- const struct domain *owner = dom_xen;
-
- spin_lock(&d->page_alloc_lock);
-
- if ( is_xen_heap_page(page) || ((owner = page_get_owner(page)) != d) )
- goto fail;
-
- /*
- * We require there is just one reference (PGC_allocated). We temporarily
- * drop this reference now so that we can safely swizzle the owner.
- */
- y = page->count_info;
- do {
- x = y;
- if ( (x & (PGC_count_mask|PGC_allocated)) != (1 | PGC_allocated) )
- goto fail;
- y = cmpxchg(&page->count_info, x, x & ~PGC_count_mask);
- } while ( y != x );
-
- /* Swizzle the owner then reinstate the PGC_allocated reference. */
- page_set_owner(page, NULL);
- y = page->count_info;
- do {
- x = y;
- BUG_ON((x & (PGC_count_mask|PGC_allocated)) != PGC_allocated);
- } while ( (y = cmpxchg(&page->count_info, x, x | 1)) != x );
-
- /* Unlink from original owner. */
- if ( !(memflags & MEMF_no_refcount) && !domain_adjust_tot_pages(d, -1) )
- drop_dom_ref = 1;
- page_list_del(page, &d->page_list);
-
- spin_unlock(&d->page_alloc_lock);
- if ( unlikely(drop_dom_ref) )
- put_domain(d);
- return 0;
-
- fail:
- spin_unlock(&d->page_alloc_lock);
- MEM_LOG("Bad page %lx: ed=%d sd=%d caf=%08lx taf=%" PRtype_info,
- page_to_mfn(page), d->domain_id,
- owner ? owner->domain_id : DOMID_INVALID,
- page->count_info, page->u.inuse.type_info);
- return -1;
-}
-
static int __do_update_va_mapping(
unsigned long va, u64 val64, unsigned long flags, struct domain *pg_owner)
{
@@ -1786,6 +1786,57 @@ int assign_pages(
return -1;
}
+int steal_page(
+ struct domain *d, struct page_info *page, unsigned int memflags)
+{
+ unsigned long x, y;
+ bool_t drop_dom_ref = 0;
+ const struct domain *owner = dom_xen;
+
+ spin_lock(&d->page_alloc_lock);
+
+ if ( is_xen_heap_page(page) || ((owner = page_get_owner(page)) != d) )
+ goto fail;
+
+ /*
+ * We require there is just one reference (PGC_allocated). We temporarily
+ * drop this reference now so that we can safely swizzle the owner.
+ */
+ y = page->count_info;
+ do {
+ x = y;
+ if ( (x & (PGC_count_mask|PGC_allocated)) != (1 | PGC_allocated) )
+ goto fail;
+ y = cmpxchg(&page->count_info, x, x & ~PGC_count_mask);
+ } while ( y != x );
+
+ /* Swizzle the owner then reinstate the PGC_allocated reference. */
+ page_set_owner(page, NULL);
+ y = page->count_info;
+ do {
+ x = y;
+ BUG_ON((x & (PGC_count_mask|PGC_allocated)) != PGC_allocated);
+ } while ( (y = cmpxchg(&page->count_info, x, x | 1)) != x );
+
+ /* Unlink from original owner. */
+ if ( !(memflags & MEMF_no_refcount) && !domain_adjust_tot_pages(d, -1) )
+ drop_dom_ref = 1;
+ page_list_del(page, &d->page_list);
+
+ spin_unlock(&d->page_alloc_lock);
+ if ( unlikely(drop_dom_ref) )
+ put_domain(d);
+ return 0;
+
+ fail:
+ spin_unlock(&d->page_alloc_lock);
+ gdprintk(XENLOG_WARNING,
+ "Bad page %lx: ed=%d sd=%d caf=%08lx taf=%" PRtype_info,
+ page_to_mfn(page), d->domain_id,
+ owner ? owner->domain_id : DOMID_INVALID,
+ page->count_info, page->u.inuse.type_info);
+ return -1;
+}
struct page_info *alloc_domheap_pages(
struct domain *d, unsigned int order, unsigned int memflags)
@@ -73,6 +73,8 @@ struct page_info
u64 pad;
};
+#define PRtype_info "016lx"/* should only be used for printk's */
+
#define PG_shift(idx) (BITS_PER_LONG - (idx))
#define PG_mask(x, idx) (x ## UL << PG_shift(idx))
@@ -319,8 +321,6 @@ static inline int relinquish_shared_pages(struct domain *d)
/* Arch-specific portion of memory_op hypercall. */
long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg);
-int steal_page(
- struct domain *d, struct page_info *page, unsigned int memflags);
int donate_page(
struct domain *d, struct page_info *page, unsigned int memflags);
@@ -531,8 +531,6 @@ long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void));
int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void));
-int steal_page(
- struct domain *d, struct page_info *page, unsigned int memflags);
int donate_page(
struct domain *d, struct page_info *page, unsigned int memflags);
@@ -139,6 +139,9 @@ int assign_pages(
unsigned int order,
unsigned int memflags);
+int steal_page(
+ struct domain *d, struct page_info *page, unsigned int memflags);
+
/* Dump info to serial console */
void arch_dump_shared_mem_info(void);
The implementation of steal_page for ARM and x86 would be the same. Rather than duplicating the x86 function, move it in common code. Also introduce missing define PRtype_info for ARM in order to compile the code. Signed-off-by: Julien Grall <julien.grall@citrix.com> --- Cc: Ian Campbell <ian.campbell@citrix.com> Cc: Stefano Stabellini <stefano.stabellini@citrix.com> Cc: Keir Fraser <keir@xen.org> Cc: Jan Beulich <jbeulich@suse.com> Cc: Andrew Cooper <andrew.cooper3@citrix.com> Note that this patch *shouldn't* be applied before the rest of the series. This is because the callers of this function are not fully fixed for ARM and could possibly introduce security issue. steal_page was always returning an error which lead the rest of the code to not be executed for ARM. TODO: - Check that all the callers of steal_page are properly working for ARM. --- xen/arch/arm/mm.c | 6 ------ xen/arch/x86/mm.c | 51 ------------------------------------------------ xen/common/page_alloc.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++++ xen/include/asm-arm/mm.h | 4 ++-- xen/include/asm-x86/mm.h | 2 -- xen/include/xen/mm.h | 3 +++ 6 files changed, 56 insertions(+), 61 deletions(-)