diff mbox series

[v20210713,14/31] tools: restore: preallocate pfns array

Message ID 20210713180605.12096-15-olaf@aepfle.de (mailing list archive)
State New, archived
Headers show
Series leftover from 2020 | expand

Commit Message

Olaf Hering July 13, 2021, 6:05 p.m. UTC
Remove repeated allocation from migration loop. There will never be
more than MAX_BATCH_SIZE pages to process in an incoming batch.
Allocate the space once.

Adjust the verification for page count. It must be at least one page,
but not more than MAX_BATCH_SIZE.

Signed-off-by: Olaf Hering <olaf@aepfle.de>
---
 tools/libs/guest/xg_sr_common.h  |  1 +
 tools/libs/guest/xg_sr_restore.c | 23 +++++++++++++++--------
 2 files changed, 16 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/tools/libs/guest/xg_sr_common.h b/tools/libs/guest/xg_sr_common.h
index 34d4c15b22..d32b4c46f3 100644
--- a/tools/libs/guest/xg_sr_common.h
+++ b/tools/libs/guest/xg_sr_common.h
@@ -256,6 +256,7 @@  struct xc_sr_context
         {
             struct xc_sr_restore_ops ops;
             struct restore_callbacks *callbacks;
+            xen_pfn_t *pfns;
 
             int send_back_fd;
             unsigned long p2m_size;
diff --git a/tools/libs/guest/xg_sr_restore.c b/tools/libs/guest/xg_sr_restore.c
index aa4113d7f6..e812f65f99 100644
--- a/tools/libs/guest/xg_sr_restore.c
+++ b/tools/libs/guest/xg_sr_restore.c
@@ -314,7 +314,7 @@  static int handle_page_data(struct xc_sr_context *ctx, struct xc_sr_record *rec)
     unsigned int i, pages_of_data = 0;
     int rc = -1;
 
-    xen_pfn_t *pfns = NULL, pfn;
+    xen_pfn_t pfn;
     uint32_t *types = NULL, type;
 
     /*
@@ -349,9 +349,9 @@  static int handle_page_data(struct xc_sr_context *ctx, struct xc_sr_record *rec)
         goto err;
     }
 
-    if ( pages->count < 1 )
+    if ( !pages->count || pages->count > MAX_BATCH_SIZE )
     {
-        ERROR("Expected at least 1 pfn in PAGE_DATA record");
+        ERROR("Unexpected pfn count %u in PAGE_DATA record", pages->count);
         goto err;
     }
 
@@ -362,9 +362,8 @@  static int handle_page_data(struct xc_sr_context *ctx, struct xc_sr_record *rec)
         goto err;
     }
 
-    pfns = malloc(pages->count * sizeof(*pfns));
     types = malloc(pages->count * sizeof(*types));
-    if ( !pfns || !types )
+    if ( !types )
     {
         ERROR("Unable to allocate enough memory for %u pfns",
               pages->count);
@@ -393,7 +392,7 @@  static int handle_page_data(struct xc_sr_context *ctx, struct xc_sr_record *rec)
              * have a page worth of data in the record. */
             pages_of_data++;
 
-        pfns[i] = pfn;
+        ctx->restore.pfns[i] = pfn;
         types[i] = type;
     }
 
@@ -407,11 +406,10 @@  static int handle_page_data(struct xc_sr_context *ctx, struct xc_sr_record *rec)
         goto err;
     }
 
-    rc = process_page_data(ctx, pages->count, pfns, types,
+    rc = process_page_data(ctx, pages->count, ctx->restore.pfns, types,
                            &pages->pfn[pages->count]);
  err:
     free(types);
-    free(pfns);
 
     return rc;
 }
@@ -728,6 +726,14 @@  static int setup(struct xc_sr_context *ctx)
         goto err;
     }
 
+    ctx->restore.pfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.pfns));
+    if ( !ctx->restore.pfns )
+    {
+        ERROR("Unable to allocate memory");
+        rc = -1;
+        goto err;
+    }
+
     ctx->restore.buffered_records = malloc(
         DEFAULT_BUF_RECORDS * sizeof(struct xc_sr_record));
     if ( !ctx->restore.buffered_records )
@@ -758,6 +764,7 @@  static void cleanup(struct xc_sr_context *ctx)
 
     free(ctx->restore.buffered_records);
     free(ctx->restore.populated_pfns);
+    free(ctx->restore.pfns);
 
     if ( ctx->restore.ops.cleanup(ctx) )
         PERROR("Failed to clean up");