File xen.sr-batch_pfns.patch of Package xen

From: Olaf Hering <olaf@aepfle.de>
Date: Fri, 23 Oct 2020 11:14:48 +0200
Subject: sr batch_pfns

tools/guest: save: move batch_pfns

The batch_pfns array is already allocated in advance.
Move it into the preallocated area.

Signed-off-by: Olaf Hering <olaf@aepfle.de>
---
 tools/libs/guest/xg_sr_common.h |  2 +-
 tools/libs/guest/xg_sr_save.c   | 25 +++++++++++--------------
 2 files changed, 12 insertions(+), 15 deletions(-)

--- a/tools/libs/guest/xg_sr_common.h
+++ b/tools/libs/guest/xg_sr_common.h
@@ -203,24 +203,25 @@ static inline int update_blob(struct xc_sr_blob *blob,
 
     if ( (ptr = malloc(size)) == NULL )
         return -1;
 
     free(blob->ptr);
     blob->ptr = memcpy(ptr, src, size);
     blob->size = size;
 
     return 0;
 }
 
 struct xc_sr_save_arrays {
+    xen_pfn_t batch_pfns[MAX_BATCH_SIZE];
 };
 
 struct xc_sr_restore_arrays {
 };
 
 struct xc_sr_context
 {
     xc_interface *xch;
     uint32_t domid;
     int fd;
 
     /* Plain VM, or checkpoints over time. */
@@ -240,25 +241,24 @@ struct xc_sr_context
             /* Live migrate vs non live suspend. */
             bool live;
 
             /* Further debugging information in the stream. */
             bool debug;
 
             unsigned long p2m_size;
             size_t pages_sent;
             size_t overhead_sent;
 
             struct precopy_stats stats;
 
-            xen_pfn_t *batch_pfns;
             unsigned int nr_batch_pfns;
             unsigned long *deferred_pages;
             unsigned long nr_deferred_pages;
             xc_hypercall_buffer_t dirty_bitmap_hbuf;
             struct xc_sr_save_arrays *m;
         } save;
 
         struct /* Restore data. */
         {
             struct xc_sr_restore_ops ops;
             struct restore_callbacks *callbacks;
 
--- a/tools/libs/guest/xg_sr_save.c
+++ b/tools/libs/guest/xg_sr_save.c
@@ -68,25 +68,25 @@ static int write_static_data_end_record(struct xc_sr_context *ctx)
 /*
  * Writes a CHECKPOINT record into the stream.
  */
 static int write_checkpoint_record(struct xc_sr_context *ctx)
 {
     struct xc_sr_record checkpoint = { .type = REC_TYPE_CHECKPOINT };
 
     return write_record(ctx, &checkpoint);
 }
 
 /*
  * Writes a batch of memory as a PAGE_DATA record into the stream.  The batch
- * is constructed in ctx->save.batch_pfns.
+ * is constructed in ctx->save.m->batch_pfns.
  *
  * This function:
  * - gets the types for each pfn in the batch.
  * - for each pfn with real data:
  *   - maps and attempts to localise the pages.
  * - construct and writes a PAGE_DATA record into the stream.
  */
 static int write_batch(struct xc_sr_context *ctx)
 {
     xc_interface *xch = ctx->xch;
     xen_pfn_t *mfns = NULL, *types = NULL;
     void *guest_mapping = NULL;
@@ -119,30 +119,30 @@ static int write_batch(struct xc_sr_context *ctx)
     iov = malloc((nr_pfns + 4) * sizeof(*iov));
 
     if ( !mfns || !types || !errors || !guest_data || !local_pages || !iov )
     {
         ERROR("Unable to allocate arrays for a batch of %u pages",
               nr_pfns);
         goto err;
     }
 
     for ( i = 0; i < nr_pfns; ++i )
     {
         types[i] = mfns[i] = ctx->save.ops.pfn_to_gfn(ctx,
-                                                      ctx->save.batch_pfns[i]);
+                                                      ctx->save.m->batch_pfns[i]);
 
         /* Likely a ballooned page. */
         if ( mfns[i] == INVALID_MFN )
         {
-            set_bit(ctx->save.batch_pfns[i], ctx->save.deferred_pages);
+            set_bit(ctx->save.m->batch_pfns[i], ctx->save.deferred_pages);
             ++ctx->save.nr_deferred_pages;
         }
     }
 
     rc = xc_get_pfn_type_batch(xch, ctx->domid, nr_pfns, types);
     if ( rc )
     {
         PERROR("Failed to get types for pfn batch");
         goto err;
     }
     rc = -1;
 
@@ -170,39 +170,39 @@ static int write_batch(struct xc_sr_context *ctx)
             goto err;
         }
         nr_pages_mapped = nr_pages;
 
         for ( i = 0, p = 0; i < nr_pfns; ++i )
         {
             if ( page_type_has_stream_data(types[i]) == false )
                 continue;
 
             if ( errors[p] )
             {
                 ERROR("Mapping of pfn %#"PRIpfn" (mfn %#"PRIpfn") failed %d",
-                      ctx->save.batch_pfns[i], mfns[p], errors[p]);
+                      ctx->save.m->batch_pfns[i], mfns[p], errors[p]);
                 goto err;
             }
 
             orig_page = page = guest_mapping + (p * PAGE_SIZE);
             rc = ctx->save.ops.normalise_page(ctx, types[i], &page);
 
             if ( orig_page != page )
                 local_pages[i] = page;
 
             if ( rc )
             {
                 if ( rc == -1 && errno == EAGAIN )
                 {
-                    set_bit(ctx->save.batch_pfns[i], ctx->save.deferred_pages);
+                    set_bit(ctx->save.m->batch_pfns[i], ctx->save.deferred_pages);
                     ++ctx->save.nr_deferred_pages;
                     types[i] = XEN_DOMCTL_PFINFO_XTAB;
                     --nr_pages;
                 }
                 else
                     goto err;
             }
             else
                 guest_data[i] = page;
 
             rc = -1;
             ++p;
@@ -215,25 +215,25 @@ static int write_batch(struct xc_sr_context *ctx)
         ERROR("Unable to allocate %zu bytes of memory for page data pfn list",
               nr_pfns * sizeof(*rec_pfns));
         goto err;
     }
 
     hdr.count = nr_pfns;
 
     rec.length = sizeof(hdr);
     rec.length += nr_pfns * sizeof(*rec_pfns);
     rec.length += nr_pages * PAGE_SIZE;
 
     for ( i = 0; i < nr_pfns; ++i )
-        rec_pfns[i] = ((uint64_t)(types[i]) << 32) | ctx->save.batch_pfns[i];
+        rec_pfns[i] = ((uint64_t)(types[i]) << 32) | ctx->save.m->batch_pfns[i];
 
     iov[0].iov_base = &rec.type;
     iov[0].iov_len = sizeof(rec.type);
 
     iov[1].iov_base = &rec.length;
     iov[1].iov_len = sizeof(rec.length);
 
     iov[2].iov_base = &hdr;
     iov[2].iov_len = sizeof(hdr);
 
     iov[3].iov_base = rec_pfns;
     iov[3].iov_len = nr_pfns * sizeof(*rec_pfns);
@@ -287,44 +287,44 @@ static int write_batch(struct xc_sr_context *ctx)
  */
 static int flush_batch(struct xc_sr_context *ctx)
 {
     int rc = 0;
 
     if ( ctx->save.nr_batch_pfns == 0 )
         return rc;
 
     rc = write_batch(ctx);
 
     if ( !rc )
     {
-        VALGRIND_MAKE_MEM_UNDEFINED(ctx->save.batch_pfns,
+        VALGRIND_MAKE_MEM_UNDEFINED(ctx->save.m->batch_pfns,
                                     MAX_BATCH_SIZE *
-                                    sizeof(*ctx->save.batch_pfns));
+                                    sizeof(*ctx->save.m->batch_pfns));
     }
 
     return rc;
 }
 
 /*
  * Add a single pfn to the batch, flushing the batch if full.
  */
 static int add_to_batch(struct xc_sr_context *ctx, xen_pfn_t pfn)
 {
     int rc = 0;
 
     if ( ctx->save.nr_batch_pfns == MAX_BATCH_SIZE )
         rc = flush_batch(ctx);
 
     if ( rc == 0 )
-        ctx->save.batch_pfns[ctx->save.nr_batch_pfns++] = pfn;
+        ctx->save.m->batch_pfns[ctx->save.nr_batch_pfns++] = pfn;
 
     return rc;
 }
 
 /*
  * Pause/suspend the domain, and refresh ctx->dominfo if required.
  */
 static int suspend_domain(struct xc_sr_context *ctx)
 {
     xc_interface *xch = ctx->xch;
 
     /* TODO: Properly specify the return value from this callback.  All
@@ -841,32 +841,30 @@ static int setup(struct xc_sr_context *ctx)
 {
     xc_interface *xch = ctx->xch;
     int rc;
     DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap,
                                     &ctx->save.dirty_bitmap_hbuf);
 
     rc = ctx->save.ops.setup(ctx);
     if ( rc )
         goto err;
 
     dirty_bitmap = xc_hypercall_buffer_alloc_pages(
         xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->save.p2m_size)));
-    ctx->save.batch_pfns = malloc(MAX_BATCH_SIZE *
-                                  sizeof(*ctx->save.batch_pfns));
     ctx->save.deferred_pages = bitmap_alloc(ctx->save.p2m_size);
     ctx->save.m = malloc(sizeof(*ctx->save.m));
 
-    if ( !ctx->save.m || !ctx->save.batch_pfns || !dirty_bitmap || !ctx->save.deferred_pages )
+    if ( !ctx->save.m || !dirty_bitmap || !ctx->save.deferred_pages )
     {
-        ERROR("Unable to allocate memory for dirty bitmaps, batch pfns and"
+        ERROR("Unable to allocate memory for dirty bitmaps and"
               " deferred pages");
         rc = -1;
         errno = ENOMEM;
         goto err;
     }
 
     rc = 0;
 
  err:
     return rc;
 }
 
@@ -877,25 +875,24 @@ static void cleanup(struct xc_sr_context *ctx)
                                     &ctx->save.dirty_bitmap_hbuf);
 
 
     xc_shadow_control(xch, ctx->domid, XEN_DOMCTL_SHADOW_OP_OFF,
                       NULL, 0, NULL, 0, NULL);
 
     if ( ctx->save.ops.cleanup(ctx) )
         PERROR("Failed to clean up");
 
     xc_hypercall_buffer_free_pages(xch, dirty_bitmap,
                                    NRPAGES(bitmap_size(ctx->save.p2m_size)));
     free(ctx->save.deferred_pages);
-    free(ctx->save.batch_pfns);
     free(ctx->save.m);
 }
 
 /*
  * Save a domain.
  */
 static int save(struct xc_sr_context *ctx, uint16_t guest_type)
 {
     xc_interface *xch = ctx->xch;
     int rc, saved_rc = 0, saved_errno = 0;
 
     IPRINTF("Saving domain %d, type %s",
openSUSE Build Service is sponsored by