@@ -287,6 +287,11 @@ struct xc_sr_context
{
struct /* x86 */
{
+ struct {
+ void *buffer;
+ unsigned int len;
+ } domain_context;
+
struct /* x86 PV guest. */
{
/* 4 or 8; 32 or 64 bit domain */
@@ -314,7 +319,7 @@ struct xc_sr_context
/* The guest pfns containing the p2m leaves */
xen_pfn_t *p2m_pfns;
- /* Read-only mapping of guests shared info page */
+ /* Pointer to shared_info (located in context buffer) */
shared_info_any_t *shinfo;
/* p2m generation count for verifying validity of local p2m. */
@@ -42,6 +42,64 @@ int handle_x86_tsc_info(struct xc_sr_context *ctx, struct xc_sr_record *rec)
return 0;
}
+int x86_get_context(struct xc_sr_context *ctx, uint32_t mask)
+{
+ xc_interface *xch = ctx->xch;
+ int rc;
+
+ if ( ctx->x86.domain_context.buffer )
+ {
+ ERROR("Domain context already present");
+ return -1;
+ }
+
+ rc = xc_domain_getcontext(xch, ctx->domid, mask, NULL, 0);
+ if ( rc < 0 )
+ {
+ PERROR("Unable to get size of domain context");
+ return -1;
+ }
+
+ ctx->x86.domain_context.buffer = malloc(rc);
+ if ( ctx->x86.domain_context.buffer == NULL )
+ {
+ PERROR("Unable to allocate memory for domain context");
+ return -1;
+ }
+
+ rc = xc_domain_getcontext(xch, ctx->domid, mask,
+ ctx->x86.domain_context.buffer, rc);
+ if ( rc < 0 )
+ {
+ PERROR("Unable to get domain context");
+ return -1;
+ }
+
+ ctx->x86.domain_context.len = rc;
+
+ return 0;
+}
+
+int x86_set_context(struct xc_sr_context *ctx, uint32_t mask)
+{
+ xc_interface *xch = ctx->xch;
+
+ if ( !ctx->x86.domain_context.buffer )
+ {
+ ERROR("Domain context not present");
+ return -1;
+ }
+
+ return xc_domain_setcontext(xch, ctx->domid, mask,
+ ctx->x86.domain_context.buffer,
+ ctx->x86.domain_context.len);
+}
+
+void x86_cleanup(struct xc_sr_context *ctx)
+{
+ free(ctx->x86.domain_context.buffer);
+}
+
/*
* Local variables:
* mode: C
@@ -14,6 +14,10 @@ int write_x86_tsc_info(struct xc_sr_context *ctx);
*/
int handle_x86_tsc_info(struct xc_sr_context *ctx, struct xc_sr_record *rec);
+int x86_get_context(struct xc_sr_context *ctx, uint32_t mask);
+int x86_set_context(struct xc_sr_context *ctx, uint32_t mask);
+void x86_cleanup(struct xc_sr_context *ctx);
+
#endif
/*
* Local variables:
@@ -182,6 +182,58 @@ int x86_pv_map_m2p(struct xc_sr_context *ctx)
return rc;
}
+int x86_pv_get_shinfo(struct xc_sr_context *ctx)
+{
+ unsigned int off = 0;
+ struct domain_save_descriptor *desc;
+ int rc;
+
+ rc = x86_get_context(ctx, DOMAIN_SAVE_MASK(SHARED_INFO));
+ if ( rc )
+ return rc;
+
+ do {
+ if ( ctx->x86.domain_context.len - off < sizeof(*desc) )
+ {
+ return -1;
+ }
+ desc = ctx->x86.domain_context.buffer + off;
+ off += sizeof(*desc);
+
+ switch (desc->typecode)
+ {
+ case DOMAIN_SAVE_CODE(SHARED_INFO):
+ {
+ DOMAIN_SAVE_TYPE(SHARED_INFO) *s;
+
+ if ( ctx->x86.domain_context.len - off < sizeof(*s) )
+ return -1;
+
+ s = ctx->x86.domain_context.buffer + off;
+ ctx->x86.pv.shinfo = (shared_info_any_t *)s->buffer;
+ /* fall through */
+ }
+ default:
+ off += (desc->length);
+ break;
+ }
+ } while ( desc->typecode != DOMAIN_SAVE_CODE(END) );
+
+ if ( !ctx->x86.pv.shinfo )
+ return -1;
+
+ return 0;
+}
+
+int x86_pv_set_shinfo(struct xc_sr_context *ctx)
+{
+ if ( !ctx->x86.pv.shinfo )
+ return -1;
+
+ return ctx->x86.pv.shinfo ?
+ x86_set_context(ctx, DOMAIN_SAVE_MASK(SHARED_INFO)) : -1;
+}
+
/*
* Local variables:
* mode: C
@@ -97,6 +97,9 @@ int x86_pv_domain_info(struct xc_sr_context *ctx);
*/
int x86_pv_map_m2p(struct xc_sr_context *ctx);
+int x86_pv_get_shinfo(struct xc_sr_context *ctx);
+int x86_pv_set_shinfo(struct xc_sr_context *ctx);
+
#endif
/*
* Local variables:
@@ -864,8 +864,7 @@ static int handle_shared_info(struct xc_sr_context *ctx,
{
xc_interface *xch = ctx->xch;
unsigned int i;
- int rc = -1;
- shared_info_any_t *guest_shinfo = NULL;
+ int rc;
const shared_info_any_t *old_shinfo = rec->data;
if ( !ctx->x86.pv.restore.seen_pv_info )
@@ -878,39 +877,30 @@ static int handle_shared_info(struct xc_sr_context *ctx,
{
ERROR("X86_PV_SHARED_INFO record wrong size: length %u"
", expected 4096", rec->length);
- goto err;
+ return -1;
}
- guest_shinfo = xc_map_foreign_range(
- xch, ctx->domid, PAGE_SIZE, PROT_READ | PROT_WRITE,
- ctx->dominfo.shared_info_frame);
- if ( !guest_shinfo )
- {
- PERROR("Failed to map Shared Info at mfn %#lx",
- ctx->dominfo.shared_info_frame);
- goto err;
- }
+ rc = x86_pv_get_shinfo(ctx);
+ if ( rc )
+ return rc;
- MEMCPY_FIELD(guest_shinfo, old_shinfo, vcpu_info, ctx->x86.pv.width);
- MEMCPY_FIELD(guest_shinfo, old_shinfo, arch, ctx->x86.pv.width);
+ MEMCPY_FIELD(ctx->x86.pv.shinfo, old_shinfo, vcpu_info,
+ ctx->x86.pv.width);
+ MEMCPY_FIELD(ctx->x86.pv.shinfo, old_shinfo, arch, ctx->x86.pv.width);
- SET_FIELD(guest_shinfo, arch.pfn_to_mfn_frame_list_list,
+ SET_FIELD(ctx->x86.pv.shinfo, arch.pfn_to_mfn_frame_list_list,
0, ctx->x86.pv.width);
- MEMSET_ARRAY_FIELD(guest_shinfo, evtchn_pending, 0, ctx->x86.pv.width);
+ MEMSET_ARRAY_FIELD(ctx->x86.pv.shinfo, evtchn_pending, 0,
+ ctx->x86.pv.width);
for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
- SET_FIELD(guest_shinfo, vcpu_info[i].evtchn_pending_sel,
+ SET_FIELD(ctx->x86.pv.shinfo, vcpu_info[i].evtchn_pending_sel,
0, ctx->x86.pv.width);
- MEMSET_ARRAY_FIELD(guest_shinfo, evtchn_mask, 0xff, ctx->x86.pv.width);
-
- rc = 0;
+ MEMSET_ARRAY_FIELD(ctx->x86.pv.shinfo, evtchn_mask, 0xff,
+ ctx->x86.pv.width);
- err:
- if ( guest_shinfo )
- munmap(guest_shinfo, PAGE_SIZE);
-
- return rc;
+ return x86_pv_set_shinfo(ctx);
}
/* restore_ops function. */
@@ -9,25 +9,6 @@ static inline bool is_canonical_address(xen_vaddr_t vaddr)
return ((int64_t)vaddr >> 47) == ((int64_t)vaddr >> 63);
}
-/*
- * Maps the guests shared info page.
- */
-static int map_shinfo(struct xc_sr_context *ctx)
-{
- xc_interface *xch = ctx->xch;
-
- ctx->x86.pv.shinfo = xc_map_foreign_range(
- xch, ctx->domid, PAGE_SIZE, PROT_READ, ctx->dominfo.shared_info_frame);
- if ( !ctx->x86.pv.shinfo )
- {
- PERROR("Failed to map shared info frame at mfn %#lx",
- ctx->dominfo.shared_info_frame);
- return -1;
- }
-
- return 0;
-}
-
/*
* Copy a list of mfns from a guest, accounting for differences between guest
* and toolstack width. Can fail if truncation would occur.
@@ -1041,7 +1022,7 @@ static int x86_pv_setup(struct xc_sr_context *ctx)
if ( rc )
return rc;
- rc = map_shinfo(ctx);
+ rc = x86_pv_get_shinfo(ctx);
if ( rc )
return rc;
@@ -1112,12 +1093,11 @@ static int x86_pv_cleanup(struct xc_sr_context *ctx)
if ( ctx->x86.pv.p2m )
munmap(ctx->x86.pv.p2m, ctx->x86.pv.p2m_frames * PAGE_SIZE);
- if ( ctx->x86.pv.shinfo )
- munmap(ctx->x86.pv.shinfo, PAGE_SIZE);
-
if ( ctx->x86.pv.m2p )
munmap(ctx->x86.pv.m2p, ctx->x86.pv.nr_m2p_frames * PAGE_SIZE);
+ x86_cleanup(ctx);
+
return 0;
}
@@ -19,6 +19,7 @@
#include <xen/foreign/x86_32.h>
#include <xen/foreign/x86_64.h>
+#include <xen/save.h>
/*
** We process save/restore/migrate in batches of pages; the below
... in the save/restore code. This patch replaces direct mapping of the shared_info_frame (retrieved using XEN_DOMCTL_getdomaininfo) with save/load of the domain context SHARED_INFO record. No modifications are made to the definition of the migration stream at this point. Subsequent patches will define a record in the libxc domain image format for passing domain context and convert the save/restore code to use that. Signed-off-by: Paul Durrant <paul@xen.org> --- Cc: Ian Jackson <ian.jackson@eu.citrix.com> Cc: Wei Liu <wl@xen.org> --- tools/libxc/xc_sr_common.h | 7 +++- tools/libxc/xc_sr_common_x86.c | 58 ++++++++++++++++++++++++++++++ tools/libxc/xc_sr_common_x86.h | 4 +++ tools/libxc/xc_sr_common_x86_pv.c | 52 +++++++++++++++++++++++++++ tools/libxc/xc_sr_common_x86_pv.h | 3 ++ tools/libxc/xc_sr_restore_x86_pv.c | 40 ++++++++------------- tools/libxc/xc_sr_save_x86_pv.c | 26 ++------------ tools/libxc/xg_save_restore.h | 1 + 8 files changed, 142 insertions(+), 49 deletions(-)