File xen.sr-restore-pfns.patch of Package xen
From: Olaf Hering <olaf@aepfle.de>
Date: Fri, 23 Oct 2020 14:39:30 +0200
Subject: sr restore pfns
tools: restore: preallocate pfns array
Remove repeated allocation from migration loop. There will never be
more than MAX_BATCH_SIZE pages to process in an incoming batch.
Allocate the space once.
Adjust the verification for page count. It must be at least one page,
but not more than MAX_BATCH_SIZE.
Signed-off-by: Olaf Hering <olaf@aepfle.de>
---
tools/libs/guest/xg_sr_common.h | 1 +
tools/libs/guest/xg_sr_restore.c | 23 ++++++---
2 files changed, 16 insertions(+), 8 deletions(-)
--- a/tools/libs/guest/xg_sr_common.h
+++ b/tools/libs/guest/xg_sr_common.h
@@ -247,24 +247,25 @@ struct xc_sr_context
uint64_t *rec_pfns;
void **guest_data;
unsigned int nr_batch_pfns;
unsigned long *deferred_pages;
unsigned long nr_deferred_pages;
xc_hypercall_buffer_t dirty_bitmap_hbuf;
} save;
struct /* Restore data. */
{
struct xc_sr_restore_ops ops;
struct restore_callbacks *callbacks;
+ xen_pfn_t *pfns;
int send_back_fd;
unsigned long p2m_size;
xc_hypercall_buffer_t dirty_bitmap_hbuf;
/* From Image Header. */
uint32_t format_version;
/* From Domain Header. */
uint32_t guest_type;
uint32_t guest_page_size;
--- a/tools/libs/guest/xg_sr_restore.c
+++ b/tools/libs/guest/xg_sr_restore.c
@@ -305,25 +305,25 @@ static int process_page_data(struct xc_sr_context *ctx, unsigned int count,
/*
* Validate a PAGE_DATA record from the stream, and pass the results to
* process_page_data() to actually perform the legwork.
*/
static int handle_page_data(struct xc_sr_context *ctx, struct xc_sr_record *rec)
{
xc_interface *xch = ctx->xch;
struct xc_sr_rec_page_data_header *pages = rec->data;
unsigned int i, pages_of_data = 0;
int rc = -1;
- xen_pfn_t *pfns = NULL, pfn;
+ xen_pfn_t pfn;
uint32_t *types = NULL, type;
/*
* v2 compatibility only exists for x86 streams. This is a bit of a
* bodge, but it is less bad than duplicating handle_page_data() between
* different architectures.
*/
#if defined(__i386__) || defined(__x86_64__)
/* v2 compat. Infer the position of STATIC_DATA_END. */
if ( ctx->restore.format_version < 3 && !ctx->restore.seen_static_data_end )
{
rc = handle_static_data_end(ctx);
@@ -340,40 +340,39 @@ static int handle_page_data(struct xc_sr_context *ctx, struct xc_sr_record *rec)
ERROR("No STATIC_DATA_END seen");
goto err;
}
#endif
if ( rec->length < sizeof(*pages) )
{
ERROR("PAGE_DATA record truncated: length %u, min %zu",
rec->length, sizeof(*pages));
goto err;
}
- if ( pages->count < 1 )
+ if ( !pages->count || pages->count > MAX_BATCH_SIZE )
{
- ERROR("Expected at least 1 pfn in PAGE_DATA record");
+ ERROR("Unexpected pfn count %u in PAGE_DATA record", pages->count);
goto err;
}
if ( rec->length < sizeof(*pages) + (pages->count * sizeof(uint64_t)) )
{
ERROR("PAGE_DATA record (length %u) too short to contain %u"
" pfns worth of information", rec->length, pages->count);
goto err;
}
- pfns = malloc(pages->count * sizeof(*pfns));
types = malloc(pages->count * sizeof(*types));
- if ( !pfns || !types )
+ if ( !types )
{
ERROR("Unable to allocate enough memory for %u pfns",
pages->count);
goto err;
}
for ( i = 0; i < pages->count; ++i )
{
pfn = pages->pfn[i] & PAGE_DATA_PFN_MASK;
if ( !ctx->restore.ops.pfn_is_valid(ctx, pfn) )
{
ERROR("pfn %#"PRIpfn" (index %u) outside domain maximum", pfn, i);
@@ -384,43 +383,42 @@ static int handle_page_data(struct xc_sr_context *ctx, struct xc_sr_record *rec)
if ( !is_known_page_type(type) )
{
ERROR("Unknown type %#"PRIx32" for pfn %#"PRIpfn" (index %u)",
type, pfn, i);
goto err;
}
if ( page_type_has_stream_data(type) )
/* NOTAB and all L1 through L4 tables (including pinned) should
* have a page worth of data in the record. */
pages_of_data++;
- pfns[i] = pfn;
+ ctx->restore.pfns[i] = pfn;
types[i] = type;
}
if ( rec->length != (sizeof(*pages) +
(sizeof(uint64_t) * pages->count) +
(PAGE_SIZE * pages_of_data)) )
{
ERROR("PAGE_DATA record wrong size: length %u, expected "
"%zu + %zu + %lu", rec->length, sizeof(*pages),
(sizeof(uint64_t) * pages->count), (PAGE_SIZE * pages_of_data));
goto err;
}
- rc = process_page_data(ctx, pages->count, pfns, types,
+ rc = process_page_data(ctx, pages->count, ctx->restore.pfns, types,
&pages->pfn[pages->count]);
err:
free(types);
- free(pfns);
return rc;
}
/*
* Send checkpoint dirty pfn list to primary.
*/
static int send_checkpoint_dirty_pfn_list(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
int rc = -1;
unsigned int count, written;
@@ -706,24 +704,32 @@ static int setup(struct xc_sr_context *ctx)
goto err;
ctx->restore.max_populated_pfn = (32 * 1024 / 4) - 1;
ctx->restore.populated_pfns = bitmap_alloc(
ctx->restore.max_populated_pfn + 1);
if ( !ctx->restore.populated_pfns )
{
ERROR("Unable to allocate memory for populated_pfns bitmap");
rc = -1;
goto err;
}
+ ctx->restore.pfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.pfns));
+ if ( !ctx->restore.pfns )
+ {
+ ERROR("Unable to allocate memory");
+ rc = -1;
+ goto err;
+ }
+
ctx->restore.buffered_records = malloc(
DEFAULT_BUF_RECORDS * sizeof(struct xc_sr_record));
if ( !ctx->restore.buffered_records )
{
ERROR("Unable to allocate memory for buffered records");
rc = -1;
goto err;
}
ctx->restore.allocated_rec_num = DEFAULT_BUF_RECORDS;
err:
return rc;
@@ -736,24 +742,25 @@ static void cleanup(struct xc_sr_context *ctx)
DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap,
&ctx->restore.dirty_bitmap_hbuf);
for ( i = 0; i < ctx->restore.buffered_rec_num; i++ )
free(ctx->restore.buffered_records[i].data);
if ( ctx->stream_type == XC_STREAM_COLO )
xc_hypercall_buffer_free_pages(
xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->restore.p2m_size)));
free(ctx->restore.buffered_records);
free(ctx->restore.populated_pfns);
+ free(ctx->restore.pfns);
if ( ctx->restore.ops.cleanup(ctx) )
PERROR("Failed to clean up");
}
/*
* Restore a domain.
*/
static int restore(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
struct xc_sr_record rec;