File xen.sr-restore-mfns.patch of Package xen

From: Olaf Hering <olaf@aepfle.de>
Date: Fri, 23 Oct 2020 14:42:19 +0200
Subject: sr restore mfns

tools: restore: preallocate mfns array

Remove repeated allocation from migration loop. There will never be
more than MAX_BATCH_SIZE pages to process in an incoming batch.
Allocate the space once.

Signed-off-by: Olaf Hering <olaf@aepfle.de>
---
 tools/libs/guest/xg_sr_common.h  |  1 +
 tools/libs/guest/xg_sr_restore.c | 16 ++++-----
 2 files changed, 9 insertions(+), 8 deletions(-)

--- a/tools/libs/guest/xg_sr_common.h
+++ b/tools/libs/guest/xg_sr_common.h
@@ -249,24 +249,25 @@ struct xc_sr_context
             unsigned int nr_batch_pfns;
             unsigned long *deferred_pages;
             unsigned long nr_deferred_pages;
             xc_hypercall_buffer_t dirty_bitmap_hbuf;
         } save;
 
         struct /* Restore data. */
         {
             struct xc_sr_restore_ops ops;
             struct restore_callbacks *callbacks;
             xen_pfn_t *pfns;
             uint32_t *types;
+            xen_pfn_t *mfns;
 
             int send_back_fd;
             unsigned long p2m_size;
             xc_hypercall_buffer_t dirty_bitmap_hbuf;
 
             /* From Image Header. */
             uint32_t format_version;
 
             /* From Domain Header. */
             uint32_t guest_type;
             uint32_t guest_page_size;
 
--- a/tools/libs/guest/xg_sr_restore.c
+++ b/tools/libs/guest/xg_sr_restore.c
@@ -195,80 +195,79 @@ int populate_pfns(struct xc_sr_context *ctx, unsigned int count,
     return rc;
 }
 
 /*
  * Given a list of pfns, their types, and a block of page data from the
  * stream, populate and record their types, map the relevant subset and copy
  * the data into the guest.
  */
 static int process_page_data(struct xc_sr_context *ctx, unsigned int count,
                              xen_pfn_t *pfns, uint32_t *types, void *page_data)
 {
     xc_interface *xch = ctx->xch;
-    xen_pfn_t *mfns = malloc(count * sizeof(*mfns));
     int *map_errs = malloc(count * sizeof(*map_errs));
     int rc;
     void *mapping = NULL, *guest_page = NULL;
     unsigned int i, /* i indexes the pfns from the record. */
         j,          /* j indexes the subset of pfns we decide to map. */
         nr_pages = 0;
 
-    if ( !mfns || !map_errs )
+    if ( !map_errs )
     {
         rc = -1;
         ERROR("Failed to allocate %zu bytes to process page data",
-              count * (sizeof(*mfns) + sizeof(*map_errs)));
+              count * sizeof(*map_errs));
         goto err;
     }
 
     rc = populate_pfns(ctx, count, pfns, types);
     if ( rc )
     {
         ERROR("Failed to populate pfns for batch of %u pages", count);
         goto err;
     }
 
     for ( i = 0; i < count; ++i )
     {
         ctx->restore.ops.set_page_type(ctx, pfns[i], types[i]);
 
         if ( page_type_has_stream_data(types[i]) )
-            mfns[nr_pages++] = ctx->restore.ops.pfn_to_gfn(ctx, pfns[i]);
+            ctx->restore.mfns[nr_pages++] = ctx->restore.ops.pfn_to_gfn(ctx, pfns[i]);
     }
 
     /* Nothing to do? */
     if ( nr_pages == 0 )
         goto done;
 
     mapping = guest_page = xenforeignmemory_map(
         xch->fmem, ctx->domid, PROT_READ | PROT_WRITE,
-        nr_pages, mfns, map_errs);
+        nr_pages, ctx->restore.mfns, map_errs);
     if ( !mapping )
     {
         rc = -1;
         PERROR("Unable to map %u mfns for %u pages of data",
                nr_pages, count);
         goto err;
     }
 
     for ( i = 0, j = 0; i < count; ++i )
     {
         if ( !page_type_has_stream_data(types[i]) )
             continue;
 
         if ( map_errs[j] )
         {
             rc = -1;
             ERROR("Mapping pfn %#"PRIpfn" (mfn %#"PRIpfn", type %#"PRIx32") failed with %d",
-                  pfns[i], mfns[j], types[i], map_errs[j]);
+                  pfns[i], ctx->restore.mfns[j], types[i], map_errs[j]);
             goto err;
         }
 
         /* Undo page normalisation done by the saver. */
         rc = ctx->restore.ops.localise_page(ctx, types[i], page_data);
         if ( rc )
         {
             ERROR("Failed to localise pfn %#"PRIpfn" (type %#"PRIx32")",
                   pfns[i], types[i] >> XEN_DOMCTL_PFINFO_LTAB_SHIFT);
             goto err;
         }
 
@@ -289,25 +288,24 @@ static int process_page_data(struct xc_sr_context *ctx, unsigned int count,
         guest_page += PAGE_SIZE;
         page_data += PAGE_SIZE;
     }
 
  done:
     rc = 0;
 
  err:
     if ( mapping )
         xenforeignmemory_unmap(xch->fmem, mapping, nr_pages);
 
     free(map_errs);
-    free(mfns);
 
     return rc;
 }
 
 /*
  * Validate a PAGE_DATA record from the stream, and pass the results to
  * process_page_data() to actually perform the legwork.
  */
 static int handle_page_data(struct xc_sr_context *ctx, struct xc_sr_record *rec)
 {
     xc_interface *xch = ctx->xch;
     struct xc_sr_rec_page_data_header *pages = rec->data;
@@ -696,25 +694,26 @@ static int setup(struct xc_sr_context *ctx)
     ctx->restore.max_populated_pfn = (32 * 1024 / 4) - 1;
     ctx->restore.populated_pfns = bitmap_alloc(
         ctx->restore.max_populated_pfn + 1);
     if ( !ctx->restore.populated_pfns )
     {
         ERROR("Unable to allocate memory for populated_pfns bitmap");
         rc = -1;
         goto err;
     }
 
     ctx->restore.pfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.pfns));
     ctx->restore.types = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.types));
-    if ( !ctx->restore.pfns  || !ctx->restore.types )
+    ctx->restore.mfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.mfns));
+    if ( !ctx->restore.pfns || !ctx->restore.types || !ctx->restore.mfns )
     {
         ERROR("Unable to allocate memory");
         rc = -1;
         goto err;
     }
 
     ctx->restore.buffered_records = malloc(
         DEFAULT_BUF_RECORDS * sizeof(struct xc_sr_record));
     if ( !ctx->restore.buffered_records )
     {
         ERROR("Unable to allocate memory for buffered records");
         rc = -1;
@@ -733,24 +732,25 @@ static void cleanup(struct xc_sr_context *ctx)
     DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap,
                                     &ctx->restore.dirty_bitmap_hbuf);
 
     for ( i = 0; i < ctx->restore.buffered_rec_num; i++ )
         free(ctx->restore.buffered_records[i].data);
 
     if ( ctx->stream_type == XC_STREAM_COLO )
         xc_hypercall_buffer_free_pages(
             xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->restore.p2m_size)));
 
     free(ctx->restore.buffered_records);
     free(ctx->restore.populated_pfns);
+    free(ctx->restore.mfns);
     free(ctx->restore.types);
     free(ctx->restore.pfns);
 
     if ( ctx->restore.ops.cleanup(ctx) )
         PERROR("Failed to clean up");
 }
 
 /*
  * Restore a domain.
  */
 static int restore(struct xc_sr_context *ctx)
 {
openSUSE Build Service is sponsored by