@@ -249,6 +249,7 @@ struct xc_sr_context
int *errors;
struct iovec *iov;
uint64_t *rec_pfns;
+ void **guest_data;
unsigned int nr_batch_pfns;
unsigned long *deferred_pages;
unsigned long nr_deferred_pages;
@@ -89,7 +89,6 @@ static int write_batch(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
void *guest_mapping = NULL;
- void **guest_data = NULL;
void **local_pages = NULL;
int rc = -1;
unsigned int i, p, nr_pages = 0, nr_pages_mapped = 0;
@@ -103,12 +102,10 @@ static int write_batch(struct xc_sr_context *ctx)
assert(nr_pfns != 0);
- /* Pointers to page data to send. Mapped gfns or local allocations. */
- guest_data = calloc(nr_pfns, sizeof(*guest_data));
/* Pointers to locally allocated pages. Need freeing. */
local_pages = calloc(nr_pfns, sizeof(*local_pages));
- if ( !guest_data || !local_pages )
+ if ( !local_pages )
{
ERROR("Unable to allocate arrays for a batch of %u pages",
nr_pfns);
@@ -165,7 +162,10 @@ static int write_batch(struct xc_sr_context *ctx)
for ( i = 0, p = 0; i < nr_pfns; ++i )
{
if ( !page_type_has_stream_data(ctx->save.types[i]) )
+ {
+ ctx->save.guest_data[i] = NULL;
continue;
+ }
if ( ctx->save.errors[p] )
{
@@ -183,6 +183,7 @@ static int write_batch(struct xc_sr_context *ctx)
if ( rc )
{
+ ctx->save.guest_data[i] = NULL;
if ( rc == -1 && errno == EAGAIN )
{
set_bit(ctx->save.batch_pfns[i], ctx->save.deferred_pages);
@@ -194,7 +195,7 @@ static int write_batch(struct xc_sr_context *ctx)
goto err;
}
else
- guest_data[i] = page;
+ ctx->save.guest_data[i] = page;
rc = -1;
++p;
@@ -232,9 +233,9 @@ static int write_batch(struct xc_sr_context *ctx)
{
for ( i = 0; i < nr_pfns; ++i )
{
- if ( guest_data[i] )
+ if ( ctx->save.guest_data[i] )
{
- ctx->save.iov[iovcnt].iov_base = guest_data[i];
+ ctx->save.iov[iovcnt].iov_base = ctx->save.guest_data[i];
ctx->save.iov[iovcnt].iov_len = PAGE_SIZE;
iovcnt++;
--nr_pages;
@@ -258,7 +259,6 @@ static int write_batch(struct xc_sr_context *ctx)
for ( i = 0; local_pages && i < nr_pfns; ++i )
free(local_pages[i]);
free(local_pages);
- free(guest_data);
return rc;
}
@@ -837,11 +837,12 @@ static int setup(struct xc_sr_context *ctx)
ctx->save.errors = malloc(MAX_BATCH_SIZE * sizeof(*ctx->save.errors));
ctx->save.iov = malloc((4 + MAX_BATCH_SIZE) * sizeof(*ctx->save.iov));
ctx->save.rec_pfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->save.rec_pfns));
+ ctx->save.guest_data = malloc(MAX_BATCH_SIZE * sizeof(*ctx->save.guest_data));
ctx->save.deferred_pages = bitmap_alloc(ctx->save.p2m_size);
if ( !ctx->save.batch_pfns || !ctx->save.mfns || !ctx->save.types ||
!ctx->save.errors || !ctx->save.iov || !ctx->save.rec_pfns ||
- !dirty_bitmap || !ctx->save.deferred_pages )
+ !ctx->save.guest_data ||!dirty_bitmap || !ctx->save.deferred_pages )
{
ERROR("Unable to allocate memory for dirty bitmaps, batch pfns and"
" deferred pages");
@@ -872,6 +873,7 @@ static void cleanup(struct xc_sr_context *ctx)
xc_hypercall_buffer_free_pages(xch, dirty_bitmap,
NRPAGES(bitmap_size(ctx->save.p2m_size)));
free(ctx->save.deferred_pages);
+ free(ctx->save.guest_data);
free(ctx->save.rec_pfns);
free(ctx->save.iov);
free(ctx->save.errors);
Remove repeated allocation from migration loop. There will never be more than MAX_BATCH_SIZE pages to process in a batch. Allocate the space once. Because this was allocated with calloc: Adjust the loop to clear unused entries as needed. Signed-off-by: Olaf Hering <olaf@aepfle.de> --- tools/libs/guest/xg_sr_common.h | 1 + tools/libs/guest/xg_sr_save.c | 20 +++++++++++--------- 2 files changed, 12 insertions(+), 9 deletions(-)