File valgrind.xen-4.8-handle-all-versioned-domctl-ops.patch of Package valgrind
From: Olaf Hering <olaf@aepfle.de>
Date: Fri, 23 Nov 2018 16:16:12 +0100
Subject: xen-4.8: handle all versioned domctl ops
When support for a new domctl version is added, all versioned ops must be adjusted.
bz#390553
---
coregrind/m_syswrap/syswrap-xen.c | 4 ++++
1 file changed, 4 insertions(+)
--- a/coregrind/m_syswrap/syswrap-xen.c
+++ b/coregrind/m_syswrap/syswrap-xen.c
@@ -979,24 +979,25 @@ PRE(domctl)
PRE_MEM_READ("XEN_DOMCTL_sethvmcontext *buffer",
(Addr)domctl->u.hvmcontext.buffer.p,
domctl->u.hvmcontext.size);
break;
case VKI_XEN_DOMCTL_gethvmcontext_partial:
switch (domctl->interface_version) {
case 0x00000007:
case 0x00000008:
case 0x00000009:
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
__PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_00000005, type);
__PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_00000005, instance);
__PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_00000005, buffer);
switch (domctl->u.hvmcontext_partial_00000005.type) {
case VKI_HVM_SAVE_CODE(CPU):
if ( domctl->u.hvmcontext_partial_00000005.buffer.p )
PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
(Addr)domctl->u.hvmcontext_partial_00000005.buffer.p,
VKI_HVM_SAVE_LENGTH(CPU));
break;
case VKI_HVM_SAVE_CODE(MTRR):
@@ -1183,24 +1184,25 @@ PRE(domctl)
PRE_XEN_DOMCTL_READ(hypercall_init, gmfn);
break;
case VKI_XEN_DOMCTL_settimeoffset:
switch (domctl->interface_version) {
case 0x00000007:
case 0x00000008:
case 0x00000009:
case 0x0000000a:
PRE_XEN_DOMCTL_READ(settimeoffset_00000001, time_offset_seconds);
break;
case 0x0000000b:
+ case 0x0000000c:
PRE_XEN_DOMCTL_READ(settimeoffset_0000000b, time_offset_seconds);
break;
}
break;
case VKI_XEN_DOMCTL_getvcpuinfo:
PRE_XEN_DOMCTL_READ(getvcpuinfo, vcpu);
break;
case VKI_XEN_DOMCTL_scheduler_op:
PRE_XEN_DOMCTL_READ(scheduler_op, sched_id);
PRE_XEN_DOMCTL_READ(scheduler_op, cmd);
@@ -2404,24 +2406,25 @@ POST(domctl){
POST_MEM_WRITE((Addr)domctl->u.hvmcontext.buffer.p,
sizeof(*domctl->u.hvmcontext.buffer.p)
* domctl->u.hvmcontext.size);
break;
case VKI_XEN_DOMCTL_gethvmcontext_partial:
switch (domctl->interface_version) {
case 0x00000007:
case 0x00000008:
case 0x00000009:
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
switch (domctl->u.hvmcontext_partial_00000005.type) {
case VKI_HVM_SAVE_CODE(CPU):
if ( domctl->u.hvmcontext_partial_00000005.buffer.p )
POST_MEM_WRITE((Addr)domctl->u.hvmcontext_partial_00000005.buffer.p, VKI_HVM_SAVE_LENGTH(CPU));
break;
}
break;
}
break;
case VKI_XEN_DOMCTL_scheduler_op:
if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_getinfo ) {
@@ -2570,24 +2573,25 @@ POST(domctl){
syscall32_disables_events);
__POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
sysenter_disables_events);
__POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
mcg_cap);
#endif
break;
case 0x00000009:
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
__POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009, size);
#if defined(__i386__) || defined(__x86_64__)
__POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
syscall32_callback_eip);
__POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
sysenter_callback_eip);
__POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
syscall32_callback_cs);
__POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
sysenter_callback_cs);
__POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
syscall32_disables_events);