File valgrind.xen-4.6-versioned-XEN_SYSCTL_numainfo.patch of Package valgrind

From: Olaf Hering <olaf@aepfle.de>
Date: Fri, 23 Nov 2018 22:18:12 +0100
Subject: xen-4.6: versioned XEN_SYSCTL_numainfo

The XEN_SYSCTL_numainfo changed layout after xen-4.6.
Add new layout and handle runtime of xen-4.6.

bz#390553
---
 coregrind/m_syswrap/syswrap-xen.c | 13 +++++++++++++
 include/vki/vki-xen-sysctl.h      | 15 +++++++++++++++
 2 files changed, 28 insertions(+)

--- a/coregrind/m_syswrap/syswrap-xen.c
+++ b/coregrind/m_syswrap/syswrap-xen.c
@@ -786,24 +786,29 @@ PRE(sysctl) {
    case VKI_XEN_SYSCTL_numainfo:
       switch (sysctl->interface_version)
       {
       case 0x00000008:
       case 0x00000009:
       case 0x0000000a:
       case 0x0000000b:
          PRE_XEN_SYSCTL_READ(numainfo_00000008, max_node_index);
          PRE_XEN_SYSCTL_READ(numainfo_00000008, node_to_memsize);
          PRE_XEN_SYSCTL_READ(numainfo_00000008, node_to_memfree);
          PRE_XEN_SYSCTL_READ(numainfo_00000008, node_to_node_distance);
          break;
+      case 0x0000000c:
+         PRE_XEN_SYSCTL_READ(numainfo_0000000c, num_nodes);
+         PRE_XEN_SYSCTL_READ(numainfo_0000000c, meminfo);
+         PRE_XEN_SYSCTL_READ(numainfo_0000000c, distance);
+         break;
       }
       break;
 
    default:
       bad_subop(tid, layout, arrghs, status, flags,
                 "__HYPERVISOR_sysctl", sysctl->cmd);
       break;
    }
 #undef PRE_XEN_SYSCTL_READ
 #undef __PRE_XEN_SYSCTL_READ
 }
 
@@ -2153,24 +2158,32 @@ POST(sysctl)
       case 0x00000009:
       case 0x0000000a:
       case 0x0000000b:
          POST_XEN_SYSCTL_WRITE(numainfo_00000008, max_node_index);
          POST_MEM_WRITE((Addr)sysctl->u.numainfo_00000008.node_to_memsize.p,
                         sizeof(uint64_t) * sysctl->u.numainfo_00000008.max_node_index);
          POST_MEM_WRITE((Addr)sysctl->u.numainfo_00000008.node_to_memfree.p,
                         sizeof(uint64_t) * sysctl->u.numainfo_00000008.max_node_index);
          POST_MEM_WRITE((Addr)sysctl->u.numainfo_00000008.node_to_node_distance.p,
                         sizeof(uint32_t) *
                         (sysctl->u.numainfo_00000008.max_node_index * sysctl->u.numainfo_00000008.max_node_index));
          break;
+      case 0x0000000c:
+         POST_XEN_SYSCTL_WRITE(numainfo_0000000c, num_nodes);
+         POST_MEM_WRITE((Addr)sysctl->u.numainfo_0000000c.meminfo.p,
+                        sizeof(uint64_t) * sysctl->u.numainfo_0000000c.num_nodes);
+         POST_MEM_WRITE((Addr)sysctl->u.numainfo_0000000c.distance.p,
+                        sizeof(uint32_t) *
+			(sysctl->u.numainfo_0000000c.num_nodes * sysctl->u.numainfo_0000000c.num_nodes));
+         break;
       }
       break;
 
    /* No outputs */
    case VKI_XEN_SYSCTL_debug_keys:
        break;
    }
 #undef POST_XEN_SYSCTL_WRITE
 #undef __POST_XEN_SYSCTL_WRITE
 }
 
 POST(domctl){
--- a/include/vki/vki-xen-sysctl.h
+++ b/include/vki/vki-xen-sysctl.h
@@ -129,24 +129,38 @@ struct vki_xen_sysctl_topologyinfo {
     vki_uint32_t max_cpu_index;
     VKI_XEN_GUEST_HANDLE_64(vki_uint32) cpu_to_core;
     VKI_XEN_GUEST_HANDLE_64(vki_uint32) cpu_to_socket;
     VKI_XEN_GUEST_HANDLE_64(vki_uint32) cpu_to_node;
 };
 
 struct vki_xen_sysctl_numainfo_00000008 {
     vki_uint32_t max_node_index;
     VKI_XEN_GUEST_HANDLE_64(vki_uint64) node_to_memsize;
     VKI_XEN_GUEST_HANDLE_64(vki_uint64) node_to_memfree;
     VKI_XEN_GUEST_HANDLE_64(vki_uint32) node_to_node_distance;
 };
+
+struct vki_xen_xen_sysctl_meminfo_0000000c {
+    vki_uint64_t memsize;
+    vki_uint64_t memfree;
+};
+typedef struct vki_xen_xen_sysctl_meminfo_0000000c vki_xen_xen_sysctl_meminfo_0000000c_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_xen_sysctl_meminfo_0000000c_t);
+
+struct vki_xen_sysctl_numainfo_0000000c {
+    vki_uint32_t num_nodes;
+    VKI_XEN_GUEST_HANDLE_64(vki_xen_xen_sysctl_meminfo_0000000c_t) meminfo;
+    VKI_XEN_GUEST_HANDLE_64(vki_uint32) distance;
+};
+
 struct vki_xen_sysctl_physinfo_00000008 {
     vki_uint32_t threads_per_core;
     vki_uint32_t cores_per_socket;
     vki_uint32_t nr_cpus;     /* # CPUs currently online */
     vki_uint32_t max_cpu_id;  /* Largest possible CPU ID on this host */
     vki_uint32_t nr_nodes;    /* # nodes currently online */
     vki_uint32_t max_node_id; /* Largest possible node ID on this host */
     vki_uint32_t cpu_khz;
     vki_xen_uint64_aligned_t total_pages;
     vki_xen_uint64_aligned_t free_pages;
     vki_xen_uint64_aligned_t scrub_pages;
     vki_uint32_t hw_cap[8];
@@ -196,24 +210,25 @@ struct vki_xen_sysctl_sched_id {
 
 struct vki_xen_sysctl {
     vki_uint32_t cmd;
     vki_uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
     union {
         struct vki_xen_sysctl_readconsole       readconsole;
         //struct vki_xen_sysctl_tbuf_op           tbuf_op;
         struct vki_xen_sysctl_physinfo_00000008 physinfo_00000008;
         struct vki_xen_sysctl_physinfo_0000000a physinfo_0000000a;
         struct vki_xen_sysctl_physinfo_00000010 physinfo_00000010;
         struct vki_xen_sysctl_topologyinfo      topologyinfo;
         struct vki_xen_sysctl_numainfo_00000008 numainfo_00000008;
+        struct vki_xen_sysctl_numainfo_0000000c numainfo_0000000c;
         struct vki_xen_sysctl_sched_id          sched_id;
         //struct vki_xen_sysctl_perfc_op          perfc_op;
         /* getdomaininfolist (sysctl) suffix is the getdomaininfo (domctl) suffix */
         struct vki_xen_sysctl_getdomaininfolist_00000007 getdomaininfolist_00000007;
         struct vki_xen_sysctl_getdomaininfolist_00000008 getdomaininfolist_00000008;
         struct vki_xen_sysctl_getdomaininfolist_00000009 getdomaininfolist_00000009;
         struct vki_xen_sysctl_getdomaininfolist_0000000f getdomaininfolist_0000000f;
         struct vki_xen_sysctl_debug_keys        debug_keys;
         //struct vki_xen_sysctl_getcpuinfo        getcpuinfo;
         //struct vki_xen_sysctl_availheap         availheap;
         //struct vki_xen_sysctl_get_pmstat        get_pmstat;
         //struct vki_xen_sysctl_cpu_hotplug       cpu_hotplug;
openSUSE Build Service is sponsored by