File xen.sr-save-show_transfer_rate.patch of Package xen

From: Olaf Hering <olaf@aepfle.de>
Date: Fri, 23 Oct 2020 15:39:59 +0200
Subject: sr save show_transfer_rate

tools: show migration transfer rate in send_dirty_pages

Show how fast domU pages are transferred in each iteration.

The relevant data is how fast the pfns travel, not so much how much
protocol overhead exists. So the reported MiB/sec is just for pfns.

Signed-off-by: Olaf Hering <olaf@aepfle.de>

v02:
- rearrange MiB_sec calculation (jgross)
---
 tools/libs/guest/xg_sr_common.h |  2 +
 tools/libs/guest/xg_sr_save.c   | 46 +++++++++
 2 files changed, 48 insertions(+)

--- a/tools/libs/guest/xg_sr_common.h
+++ b/tools/libs/guest/xg_sr_common.h
@@ -229,24 +229,26 @@ struct xc_sr_context
             int recv_fd;
 
             struct xc_sr_save_ops ops;
             struct save_callbacks *callbacks;
 
             /* Live migrate vs non live suspend. */
             bool live;
 
             /* Further debugging information in the stream. */
             bool debug;
 
             unsigned long p2m_size;
+            size_t pages_sent;
+            size_t overhead_sent;
 
             struct precopy_stats stats;
 
             xen_pfn_t *batch_pfns;
             unsigned int nr_batch_pfns;
             unsigned long *deferred_pages;
             unsigned long nr_deferred_pages;
             xc_hypercall_buffer_t dirty_bitmap_hbuf;
         } save;
 
         struct /* Restore data. */
         {
--- a/tools/libs/guest/xg_sr_save.c
+++ b/tools/libs/guest/xg_sr_save.c
@@ -1,14 +1,15 @@
 #include <assert.h>
 #include <arpa/inet.h>
+#include <time.h>
 
 #include "xg_sr_common.h"
 
 /*
  * Writes an Image header and Domain header into the stream.
  */
 static int write_headers(struct xc_sr_context *ctx, uint16_t guest_type)
 {
     xc_interface *xch = ctx->xch;
     int32_t xen_version = xc_version(xch, XENVER_version, NULL);
     struct xc_sr_ihdr ihdr = {
         .marker  = IHDR_MARKER,
@@ -229,24 +230,26 @@ static int write_batch(struct xc_sr_context *ctx)
     iov[0].iov_len = sizeof(rec.type);
 
     iov[1].iov_base = &rec.length;
     iov[1].iov_len = sizeof(rec.length);
 
     iov[2].iov_base = &hdr;
     iov[2].iov_len = sizeof(hdr);
 
     iov[3].iov_base = rec_pfns;
     iov[3].iov_len = nr_pfns * sizeof(*rec_pfns);
 
     iovcnt = 4;
+    ctx->save.pages_sent += nr_pages;
+    ctx->save.overhead_sent += sizeof(rec) + sizeof(hdr) + nr_pfns * sizeof(*rec_pfns);
 
     if ( nr_pages )
     {
         for ( i = 0; i < nr_pfns; ++i )
         {
             if ( guest_data[i] )
             {
                 iov[iovcnt].iov_base = guest_data[i];
                 iov[iovcnt].iov_len = PAGE_SIZE;
                 iovcnt++;
                 --nr_pages;
             }
@@ -347,63 +350,106 @@ static int suspend_domain(struct xc_sr_context *ctx)
     {
         ERROR("Domain has not been suspended: shutdown %d, reason %d",
               ctx->dominfo.flags & XEN_DOMINF_shutdown,
               dominfo_shutdown_reason(&ctx->dominfo));
         return -1;
     }
 
     xc_report_progress_single(xch, "Domain now suspended");
 
     return 0;
 }
 
+static void show_transfer_rate(struct xc_sr_context *ctx, struct timespec *start)
+{
+    xc_interface *xch = ctx->xch;
+    struct timespec end = {}, diff = {};
+    size_t ms, MiB_sec;
+
+    if (!ctx->save.pages_sent)
+        return;
+
+    if ( clock_gettime(CLOCK_MONOTONIC, &end) )
+        PERROR("clock_gettime");
+
+    if ( (end.tv_nsec - start->tv_nsec) < 0 )
+    {
+        diff.tv_sec = end.tv_sec - start->tv_sec - 1;
+        diff.tv_nsec = end.tv_nsec - start->tv_nsec + (1000U*1000U*1000U);
+    }
+    else
+    {
+        diff.tv_sec = end.tv_sec - start->tv_sec;
+        diff.tv_nsec = end.tv_nsec - start->tv_nsec;
+    }
+
+    ms = (diff.tv_nsec / (1000U*1000U));
+    ms += (diff.tv_sec * 1000U);
+    if (!ms)
+        ms = 1;
+
+    MiB_sec = (ctx->save.pages_sent * PAGE_SIZE * 1000U) / ms / (1024U*1024U);
+
+    errno = 0;
+    IPRINTF("%s: %zu bytes + %zu pages in %ld.%09ld sec, %zu MiB/sec", __func__,
+            ctx->save.overhead_sent, ctx->save.pages_sent,
+            diff.tv_sec, diff.tv_nsec, MiB_sec);
+}
+
 /*
  * Send a subset of pages in the guests p2m, according to the dirty bitmap.
  * Used for each subsequent iteration of the live migration loop.
  *
  * Bitmap is bounded by p2m_size.
  */
 static int send_dirty_pages(struct xc_sr_context *ctx,
                             unsigned long entries)
 {
     xc_interface *xch = ctx->xch;
     xen_pfn_t p;
     unsigned long written;
     int rc;
+    struct timespec start = {};
     DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap,
                                     &ctx->save.dirty_bitmap_hbuf);
 
+    ctx->save.pages_sent = 0;
+    ctx->save.overhead_sent = 0;
+    if ( clock_gettime(CLOCK_MONOTONIC, &start) )
+        PERROR("clock_gettime");
+
     for ( p = 0, written = 0; p < ctx->save.p2m_size; ++p )
     {
         if ( !test_bit(p, dirty_bitmap) )
             continue;
 
         rc = add_to_batch(ctx, p);
         if ( rc )
             return rc;
 
         /* Update progress every 4MB worth of memory sent. */
         if ( (written & ((1U << (22 - 12)) - 1)) == 0 )
             xc_report_progress_step(xch, written, entries);
 
         ++written;
     }
 
     rc = flush_batch(ctx);
     if ( rc )
         return rc;
 
     if ( written > entries )
         DPRINTF("Bitmap contained more entries than expected...");
 
+    show_transfer_rate(ctx, &start);
     xc_report_progress_step(xch, entries, entries);
 
     return ctx->save.ops.check_vm_state(ctx);
 }
 
 /*
  * Send all pages in the guests p2m.  Used as the first iteration of the live
  * migration loop, and for a non-live save.
  */
 static int send_all_pages(struct xc_sr_context *ctx)
 {
     DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap,
openSUSE Build Service is sponsored by