File 19168-hvm-domctl.patch of Package xen
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1233836188 0
# Node ID 1dfcb2444c6e0c0ff0e2809f4ac4ee4adda72fa6
# Parent 8303bd33d0304ed4f4edc94960c874eabad60563
Add a new domctl to get a single record from the HVM save context
Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
Index: xen-3.3.1-testing/xen/arch/x86/domctl.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/domctl.c
+++ xen-3.3.1-testing/xen/arch/x86/domctl.c
@@ -425,6 +425,34 @@ long arch_do_domctl(
}
break;
+ case XEN_DOMCTL_gethvmcontext_partial:
+ {
+ struct domain *d;
+
+ ret = -ESRCH;
+ if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
+ break;
+
+ ret = xsm_hvmcontext(d, domctl->cmd);
+ if ( ret )
+ goto gethvmcontext_partial_out;
+
+ ret = -EINVAL;
+ if ( !is_hvm_domain(d) )
+ goto gethvmcontext_partial_out;
+
+ domain_pause(d);
+ ret = hvm_save_one(d, domctl->u.hvmcontext_partial.type,
+ domctl->u.hvmcontext_partial.instance,
+ domctl->u.hvmcontext_partial.buffer);
+ domain_unpause(d);
+
+ gethvmcontext_partial_out:
+ rcu_unlock_domain(d);
+ }
+ break;
+
+
case XEN_DOMCTL_set_address_size:
{
struct domain *d;
Index: xen-3.3.1-testing/xen/common/hvm/save.c
===================================================================
--- xen-3.3.1-testing.orig/xen/common/hvm/save.c
+++ xen-3.3.1-testing/xen/common/hvm/save.c
@@ -26,20 +26,21 @@
#include <xen/version.h>
#include <public/version.h>
#include <xen/sched.h>
+#include <xen/guest_access.h>
#include <asm/hvm/support.h>
/* List of handlers for various HVM save and restore types */
-static struct {
+static struct {
hvm_save_handler save;
- hvm_load_handler load;
+ hvm_load_handler load;
const char *name;
size_t size;
int kind;
} hvm_sr_handlers [HVM_SAVE_CODE_MAX + 1] = {{NULL, NULL, "<?>"},};
/* Init-time function to add entries to that list */
-void hvm_register_savevm(uint16_t typecode,
+void hvm_register_savevm(uint16_t typecode,
const char *name,
hvm_save_handler save_state,
hvm_load_handler load_state,
@@ -55,26 +56,73 @@ void hvm_register_savevm(uint16_t typeco
hvm_sr_handlers[typecode].kind = kind;
}
-size_t hvm_save_size(struct domain *d)
+size_t hvm_save_size(struct domain *d)
{
struct vcpu *v;
size_t sz;
int i;
-
+
/* Basic overhead for header and footer */
sz = (2 * sizeof (struct hvm_save_descriptor)) + HVM_SAVE_LENGTH(HEADER);
/* Plus space for each thing we will be saving */
- for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ )
+ for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ )
if ( hvm_sr_handlers[i].kind == HVMSR_PER_VCPU )
for_each_vcpu(d, v)
sz += hvm_sr_handlers[i].size;
- else
+ else
sz += hvm_sr_handlers[i].size;
return sz;
}
+/* Extract a single instance of a save record, by marshalling all
+ * records of that type and copying out the one we need. */
+int hvm_save_one(struct domain *d, uint16_t typecode, uint16_t instance,
+ XEN_GUEST_HANDLE_64(uint8) handle)
+{
+ int rv = 0;
+ size_t sz = 0;
+ struct vcpu *v;
+ hvm_domain_context_t ctxt = { 0, };
+
+ if ( d->is_dying
+ || typecode > HVM_SAVE_CODE_MAX
+ || hvm_sr_handlers[typecode].size < sizeof(struct hvm_save_descriptor)
+ || hvm_sr_handlers[typecode].save == NULL )
+ return -EINVAL;
+
+ if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU )
+ for_each_vcpu(d, v)
+ sz += hvm_sr_handlers[typecode].size;
+ else
+ sz = hvm_sr_handlers[typecode].size;
+
+ if ( (instance + 1) * hvm_sr_handlers[typecode].size > sz )
+ return -EINVAL;
+
+ ctxt.size = sz;
+ ctxt.data = xmalloc_bytes(sz);
+ if ( !ctxt.data )
+ return -ENOMEM;
+
+ if ( hvm_sr_handlers[typecode].save(d, &ctxt) != 0 )
+ {
+ gdprintk(XENLOG_ERR,
+ "HVM save: failed to save type %"PRIu16"\n", typecode);
+ rv = -EFAULT;
+ }
+ else if ( copy_to_guest(handle,
+ ctxt.data
+ + (instance * hvm_sr_handlers[typecode].size)
+ + sizeof (struct hvm_save_descriptor),
+ hvm_sr_handlers[typecode].size
+ - sizeof (struct hvm_save_descriptor)) )
+ rv = -EFAULT;
+
+ xfree(ctxt.data);
+ return rv;
+}
int hvm_save(struct domain *d, hvm_domain_context_t *h)
{
@@ -94,7 +142,7 @@ int hvm_save(struct domain *d, hvm_domai
c = strrchr(xen_changeset(), ':');
if ( c )
hdr.changeset = simple_strtoll(c, NULL, 16);
- else
+ else
hdr.changeset = -1ULL; /* Unknown */
arch_hvm_save(d, &hdr);
@@ -103,21 +151,21 @@ int hvm_save(struct domain *d, hvm_domai
{
gdprintk(XENLOG_ERR, "HVM save: failed to write header\n");
return -EFAULT;
- }
+ }
/* Save all available kinds of state */
- for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ )
+ for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ )
{
handler = hvm_sr_handlers[i].save;
- if ( handler != NULL )
+ if ( handler != NULL )
{
gdprintk(XENLOG_INFO, "HVM save: %s\n", hvm_sr_handlers[i].name);
- if ( handler(d, h) != 0 )
+ if ( handler(d, h) != 0 )
{
- gdprintk(XENLOG_ERR,
+ gdprintk(XENLOG_ERR,
"HVM save: failed to save type %"PRIu16"\n", i);
return -EFAULT;
- }
+ }
}
}
@@ -142,12 +190,12 @@ int hvm_load(struct domain *d, hvm_domai
struct hvm_save_descriptor *desc;
hvm_load_handler handler;
struct vcpu *v;
-
+
if ( d->is_dying )
return -EINVAL;
/* Read the save header, which must be first */
- if ( hvm_load_entry(HEADER, h, &hdr) != 0 )
+ if ( hvm_load_entry(HEADER, h, &hdr) != 0 )
return -1;
if ( arch_hvm_load(d, &hdr) )
@@ -155,10 +203,10 @@ int hvm_load(struct domain *d, hvm_domai
c = strrchr(xen_changeset(), ':');
if ( hdr.changeset == -1ULL )
- gdprintk(XENLOG_WARNING,
+ gdprintk(XENLOG_WARNING,
"HVM restore: Xen changeset was not saved.\n");
else if ( c == NULL )
- gdprintk(XENLOG_WARNING,
+ gdprintk(XENLOG_WARNING,
"HVM restore: Xen changeset is not available.\n");
else
{
@@ -169,7 +217,7 @@ int hvm_load(struct domain *d, hvm_domai
}
/* Down all the vcpus: we only re-enable the ones that had state saved. */
- for_each_vcpu(d, v)
+ for_each_vcpu(d, v)
if ( test_and_set_bit(_VPF_down, &v->pause_flags) )
vcpu_sleep_nosync(v);
@@ -178,33 +226,33 @@ int hvm_load(struct domain *d, hvm_domai
if ( h->size - h->cur < sizeof(struct hvm_save_descriptor) )
{
/* Run out of data */
- gdprintk(XENLOG_ERR,
+ gdprintk(XENLOG_ERR,
"HVM restore: save did not end with a null entry\n");
return -1;
}
-
+
/* Read the typecode of the next entry and check for the end-marker */
desc = (struct hvm_save_descriptor *)(&h->data[h->cur]);
if ( desc->typecode == 0 )
- return 0;
-
+ return 0;
+
/* Find the handler for this entry */
if ( (desc->typecode > HVM_SAVE_CODE_MAX) ||
((handler = hvm_sr_handlers[desc->typecode].load) == NULL) )
{
- gdprintk(XENLOG_ERR,
- "HVM restore: unknown entry typecode %u\n",
+ gdprintk(XENLOG_ERR,
+ "HVM restore: unknown entry typecode %u\n",
desc->typecode);
return -1;
}
/* Load the entry */
- gdprintk(XENLOG_INFO, "HVM restore: %s %"PRIu16"\n",
+ gdprintk(XENLOG_INFO, "HVM restore: %s %"PRIu16"\n",
hvm_sr_handlers[desc->typecode].name, desc->instance);
- if ( handler(d, h) != 0 )
+ if ( handler(d, h) != 0 )
{
- gdprintk(XENLOG_ERR,
- "HVM restore: failed to load entry %u/%u\n",
+ gdprintk(XENLOG_ERR,
+ "HVM restore: failed to load entry %u/%u\n",
desc->typecode, desc->instance);
return -1;
}
Index: xen-3.3.1-testing/xen/include/public/domctl.h
===================================================================
--- xen-3.3.1-testing.orig/xen/include/public/domctl.h
+++ xen-3.3.1-testing/xen/include/public/domctl.h
@@ -536,6 +536,17 @@ struct xen_domctl_pin_mem_cacheattr {
typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t);
+/*
+ * Request a particular record from the HVM context
+ */
+#define XEN_DOMCTL_gethvmcontext_partial 55
+typedef struct xen_domctl_hvmcontext_partial {
+ uint32_t type; /* IN: Type of record required */
+ uint32_t instance; /* IN: Instance of that type */
+ XEN_GUEST_HANDLE_64(uint8) buffer; /* OUT: buffer to write record into */
+} xen_domctl_hvmcontext_partial_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_partial_t);
+
#define XEN_DOMCTL_set_ext_vcpucontext 42
#define XEN_DOMCTL_get_ext_vcpucontext 43
@@ -646,6 +657,7 @@ struct xen_domctl {
struct xen_domctl_settimeoffset settimeoffset;
struct xen_domctl_real_mode_area real_mode_area;
struct xen_domctl_hvmcontext hvmcontext;
+ struct xen_domctl_hvmcontext_partial hvmcontext_partial;
struct xen_domctl_address_size address_size;
struct xen_domctl_sendtrigger sendtrigger;
struct xen_domctl_get_device_group get_device_group;
Index: xen-3.3.1-testing/xen/include/xen/hvm/save.h
===================================================================
--- xen-3.3.1-testing.orig/xen/include/xen/hvm/save.h
+++ xen-3.3.1-testing/xen/include/xen/hvm/save.h
@@ -152,6 +152,8 @@ __initcall(__hvm_register_##_x##_save_an
/* Entry points for saving and restoring HVM domain state */
size_t hvm_save_size(struct domain *d);
int hvm_save(struct domain *d, hvm_domain_context_t *h);
+int hvm_save_one(struct domain *d, uint16_t typecode, uint16_t instance,
+ XEN_GUEST_HANDLE_64(uint8) handle);
int hvm_load(struct domain *d, hvm_domain_context_t *h);
/* Arch-specific definitions. */
Index: xen-3.3.1-testing/xen/xsm/flask/hooks.c
===================================================================
--- xen-3.3.1-testing.orig/xen/xsm/flask/hooks.c
+++ xen-3.3.1-testing/xen/xsm/flask/hooks.c
@@ -814,6 +814,7 @@ static int flask_hvmcontext(struct domai
perm = HVM__SETHVMC;
break;
case XEN_DOMCTL_gethvmcontext:
+ case XEN_DOMCTL_gethvmcontext_partial:
perm = HVM__GETHVMC;
break;
default: