File xen.sr-restore-populate_pfns-pfns.patch of Package xen

From: Olaf Hering <olaf@aepfle.de>
Date: Fri, 23 Oct 2020 14:58:53 +0200
Subject: sr restore populate_pfns pfns

tools: restore: preallocate populate_pfns pfns array

Remove repeated allocation from migration loop. There will never be
more than MAX_BATCH_SIZE pages to process in an incoming batch.
Allocate the space once.

Use some prefix to avoid conflict with an array used in handle_page_data.

Signed-off-by: Olaf Hering <olaf@aepfle.de>
---
 tools/libs/guest/xg_sr_common.h  |  1 +
 tools/libs/guest/xg_sr_restore.c | 14 ++++-----
 2 files changed, 8 insertions(+), 7 deletions(-)

--- a/tools/libs/guest/xg_sr_common.h
+++ b/tools/libs/guest/xg_sr_common.h
@@ -251,24 +251,25 @@ struct xc_sr_context
             unsigned long nr_deferred_pages;
             xc_hypercall_buffer_t dirty_bitmap_hbuf;
         } save;
 
         struct /* Restore data. */
         {
             struct xc_sr_restore_ops ops;
             struct restore_callbacks *callbacks;
             xen_pfn_t *pfns;
             uint32_t *types;
             xen_pfn_t *mfns;
             int *map_errs;
+            xen_pfn_t *pp_pfns;
 
             int send_back_fd;
             unsigned long p2m_size;
             xc_hypercall_buffer_t dirty_bitmap_hbuf;
 
             /* From Image Header. */
             uint32_t format_version;
 
             /* From Domain Header. */
             uint32_t guest_type;
             uint32_t guest_page_size;
 
--- a/tools/libs/guest/xg_sr_restore.c
+++ b/tools/libs/guest/xg_sr_restore.c
@@ -129,76 +129,74 @@ static int pfn_set_populated(struct xc_sr_context *ctx, xen_pfn_t pfn)
     return 0;
 }
 
 /*
  * Given a set of pfns, obtain memory from Xen to fill the physmap for the
  * unpopulated subset.  If types is NULL, no page type checking is performed
  * and all unpopulated pfns are populated.
  */
 int populate_pfns(struct xc_sr_context *ctx, unsigned int count,
                   const xen_pfn_t *original_pfns, const uint32_t *types)
 {
     xc_interface *xch = ctx->xch;
-    xen_pfn_t *mfns = malloc(count * sizeof(*mfns)),
-        *pfns = malloc(count * sizeof(*pfns));
+    xen_pfn_t *mfns = malloc(count * sizeof(*mfns));
     unsigned int i, nr_pfns = 0;
     int rc = -1;
 
-    if ( !mfns || !pfns )
+    if ( !mfns )
     {
         ERROR("Failed to allocate %zu bytes for populating the physmap",
               2 * count * sizeof(*mfns));
         goto err;
     }
 
     for ( i = 0; i < count; ++i )
     {
         if ( (!types || page_type_to_populate(types[i])) &&
              !pfn_is_populated(ctx, original_pfns[i]) )
         {
             rc = pfn_set_populated(ctx, original_pfns[i]);
             if ( rc )
                 goto err;
-            pfns[nr_pfns] = mfns[nr_pfns] = original_pfns[i];
+            ctx->restore.pp_pfns[nr_pfns] = mfns[nr_pfns] = original_pfns[i];
             ++nr_pfns;
         }
     }
 
     if ( nr_pfns )
     {
         rc = xc_domain_populate_physmap_exact(
             xch, ctx->domid, nr_pfns, 0, 0, mfns);
         if ( rc )
         {
             PERROR("Failed to populate physmap");
             goto err;
         }
 
         for ( i = 0; i < nr_pfns; ++i )
         {
             if ( mfns[i] == INVALID_MFN )
             {
                 ERROR("Populate physmap failed for pfn %u", i);
                 rc = -1;
                 goto err;
             }
 
-            ctx->restore.ops.set_gfn(ctx, pfns[i], mfns[i]);
+            ctx->restore.ops.set_gfn(ctx, ctx->restore.pp_pfns[i], mfns[i]);
         }
     }
 
     rc = 0;
 
  err:
-    free(pfns);
     free(mfns);
 
     return rc;
 }
 
 /*
  * Given a list of pfns, their types, and a block of page data from the
  * stream, populate and record their types, map the relevant subset and copy
  * the data into the guest.
  */
 static int process_page_data(struct xc_sr_context *ctx, unsigned int count,
                              xen_pfn_t *pfns, uint32_t *types, void *page_data)
@@ -685,26 +683,27 @@ static int setup(struct xc_sr_context *ctx)
         ctx->restore.max_populated_pfn + 1);
     if ( !ctx->restore.populated_pfns )
     {
         ERROR("Unable to allocate memory for populated_pfns bitmap");
         rc = -1;
         goto err;
     }
 
     ctx->restore.pfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.pfns));
     ctx->restore.types = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.types));
     ctx->restore.mfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.mfns));
     ctx->restore.map_errs = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.map_errs));
+    ctx->restore.pp_pfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.pp_pfns));
     if ( !ctx->restore.pfns || !ctx->restore.types || !ctx->restore.mfns ||
-         !ctx->restore.map_errs )
+         !ctx->restore.map_errs || !ctx->restore.pp_pfns )
     {
         ERROR("Unable to allocate memory");
         rc = -1;
         goto err;
     }
 
     ctx->restore.buffered_records = malloc(
         DEFAULT_BUF_RECORDS * sizeof(struct xc_sr_record));
     if ( !ctx->restore.buffered_records )
     {
         ERROR("Unable to allocate memory for buffered records");
         rc = -1;
@@ -723,24 +722,25 @@ static void cleanup(struct xc_sr_context *ctx)
     DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap,
                                     &ctx->restore.dirty_bitmap_hbuf);
 
     for ( i = 0; i < ctx->restore.buffered_rec_num; i++ )
         free(ctx->restore.buffered_records[i].data);
 
     if ( ctx->stream_type == XC_STREAM_COLO )
         xc_hypercall_buffer_free_pages(
             xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->restore.p2m_size)));
 
     free(ctx->restore.buffered_records);
     free(ctx->restore.populated_pfns);
+    free(ctx->restore.pp_pfns);
     free(ctx->restore.map_errs);
     free(ctx->restore.mfns);
     free(ctx->restore.types);
     free(ctx->restore.pfns);
 
     if ( ctx->restore.ops.cleanup(ctx) )
         PERROR("Failed to clean up");
 }
 
 /*
  * Restore a domain.
  */
openSUSE Build Service is sponsored by