File cpu-pools-libxc.patch of Package xen

Index: xen-4.0.2-testing/tools/libxc/Makefile
===================================================================
--- xen-4.0.2-testing.orig/tools/libxc/Makefile
+++ xen-4.0.2-testing/tools/libxc/Makefile
@@ -8,6 +8,7 @@ CTRL_SRCS-y       :=
 CTRL_SRCS-y       += xc_core.c
 CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c
 CTRL_SRCS-$(CONFIG_IA64) += xc_core_ia64.c
+CTRL_SRCS-y       += xc_cpupool.c
 CTRL_SRCS-y       += xc_domain.c
 CTRL_SRCS-y       += xc_evtchn.c
 CTRL_SRCS-y       += xc_misc.c
Index: xen-4.0.2-testing/tools/libxc/xc_cpupool.c
===================================================================
--- /dev/null
+++ xen-4.0.2-testing/tools/libxc/xc_cpupool.c
@@ -0,0 +1,165 @@
+/******************************************************************************
+ * xc_cpupool.c
+ *
+ * API for manipulating and obtaining information on cpupools.
+ *
+ * Copyright (c) 2009, J Gross.
+ */
+
+#include <stdarg.h>
+#include "xc_private.h"
+
+static int do_sysctl_save(int xc_handle, struct xen_sysctl *sysctl)
+{
+    int ret;
+
+    do {
+        ret = do_sysctl(xc_handle, sysctl);
+    } while ( (ret < 0) && (errno == EAGAIN) );
+
+    return ret;
+}
+
+int xc_cpupool_create(int xc_handle,
+                      uint32_t *ppoolid,
+                      uint32_t sched_id)
+{
+    int err;
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_CREATE;
+    sysctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
+        XEN_SYSCTL_CPUPOOL_PAR_ANY : *ppoolid;
+    sysctl.u.cpupool_op.sched_id = sched_id;
+    if ( (err = do_sysctl_save(xc_handle, &sysctl)) != 0 )
+        return err;
+
+    *ppoolid = sysctl.u.cpupool_op.cpupool_id;
+    return 0;
+}
+
+int xc_cpupool_destroy(int xc_handle,
+                       uint32_t poolid)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_DESTROY;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    return do_sysctl_save(xc_handle, &sysctl);
+}
+
+int xc_cpupool_getinfo(int xc_handle,
+                       uint32_t first_poolid,
+                       uint32_t n_max,
+                       xc_cpupoolinfo_t *info)
+{
+    int err = 0;
+    int p;
+    uint32_t poolid = first_poolid;
+    uint8_t local[sizeof (info->cpumap)];
+    DECLARE_SYSCTL;
+
+    memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+
+    for (p = 0; p < n_max; p++)
+    {
+        sysctl.cmd = XEN_SYSCTL_cpupool_op;
+        sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
+        sysctl.u.cpupool_op.cpupool_id = poolid;
+        set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+        sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+
+        if ( (err = lock_pages(local, sizeof(local))) != 0 )
+        {
+            PERROR("Could not lock memory for Xen hypercall");
+            break;
+        }
+        err = do_sysctl_save(xc_handle, &sysctl);
+        unlock_pages(local, sizeof (local));
+
+        if ( err < 0 )
+            break;
+
+        info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
+        info->sched_id = sysctl.u.cpupool_op.sched_id;
+        info->n_dom = sysctl.u.cpupool_op.n_dom;
+        bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
+        poolid = sysctl.u.cpupool_op.cpupool_id + 1;
+        info++;
+    }
+
+    if ( p == 0 )
+        return err;
+
+    return p;
+}
+
+int xc_cpupool_addcpu(int xc_handle,
+                      uint32_t poolid,
+                      int cpu)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_ADDCPU;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    sysctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_SYSCTL_CPUPOOL_PAR_ANY : cpu;
+    return do_sysctl_save(xc_handle, &sysctl);
+}
+
+int xc_cpupool_removecpu(int xc_handle,
+                         uint32_t poolid,
+                         int cpu)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_RMCPU;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    sysctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_SYSCTL_CPUPOOL_PAR_ANY : cpu;
+    return do_sysctl_save(xc_handle, &sysctl);
+}
+
+int xc_cpupool_movedomain(int xc_handle,
+                          uint32_t poolid,
+                          uint32_t domid)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    sysctl.u.cpupool_op.domid = domid;
+    return do_sysctl_save(xc_handle, &sysctl);
+}
+
+int xc_cpupool_freeinfo(int xc_handle,
+                        uint64_t *cpumap)
+{
+    int err;
+    uint8_t local[sizeof (*cpumap)];
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
+    set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+    sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+
+    if ( (err = lock_pages(local, sizeof(local))) != 0 )
+    {
+        PERROR("Could not lock memory for Xen hypercall");
+        return err;
+    }
+
+    err = do_sysctl_save(xc_handle, &sysctl);
+    unlock_pages(local, sizeof (local));
+
+    if (err < 0)
+        return err;
+
+    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
+
+    return 0;
+}
Index: xen-4.0.2-testing/tools/libxc/xc_domain.c
===================================================================
--- xen-4.0.2-testing.orig/tools/libxc/xc_domain.c
+++ xen-4.0.2-testing/tools/libxc/xc_domain.c
@@ -220,6 +220,7 @@ int xc_domain_getinfo(int xc_handle,
         info->cpu_time = domctl.u.getdomaininfo.cpu_time;
         info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
         info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
+        info->cpupool = domctl.u.getdomaininfo.cpupool;
 
         memcpy(info->handle, domctl.u.getdomaininfo.handle,
                sizeof(xen_domain_handle_t));
Index: xen-4.0.2-testing/tools/libxc/xenctrl.h
===================================================================
--- xen-4.0.2-testing.orig/tools/libxc/xenctrl.h
+++ xen-4.0.2-testing/tools/libxc/xenctrl.h
@@ -161,6 +161,7 @@ typedef struct xc_dominfo {
     unsigned int  nr_online_vcpus;
     unsigned int  max_vcpu_id;
     xen_domain_handle_t handle;
+    unsigned int  cpupool;
 } xc_dominfo_t;
 
 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
@@ -492,6 +493,100 @@ int xc_domain_setdebugging(int xc_handle
                            unsigned int enable);
 
 /*
+ * CPUPOOL MANAGEMENT FUNCTIONS
+ */
+
+typedef struct xc_cpupoolinfo {
+    uint32_t cpupool_id;
+    uint32_t sched_id;
+    uint32_t n_dom;
+    uint64_t cpumap;
+} xc_cpupoolinfo_t;
+
+/**
+ * Create a new cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm ppoolid pointer to the new cpupool id (in/out)
+ * @parm sched_id id of scheduler to use for pool
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_create(int xc_handle,
+                      uint32_t *ppoolid,
+                      uint32_t sched_id);
+
+/**
+ * Destroy a cpupool. Pool must be unused and have no cpu assigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool to destroy
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_destroy(int xc_handle,
+                       uint32_t poolid);
+
+/**
+ * Get cpupool info. Returns info for up to the specified number of cpupools
+ * starting at the given id.
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm first_poolid lowest id for which info is returned
+ * @parm n_max maximum number of cpupools to return info
+ * @parm info pointer to xc_cpupoolinfo_t array
+ * return number of cpupool infos
+ */
+int xc_cpupool_getinfo(int xc_handle,
+                       uint32_t first_poolid,
+                       uint32_t n_max,
+                       xc_cpupoolinfo_t *info);
+
+/**
+ * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to add
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_addcpu(int xc_handle,
+                      uint32_t poolid,
+                      int cpu);
+
+/**
+ * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to remove
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_removecpu(int xc_handle,
+                         uint32_t poolid,
+                         int cpu);
+
+/**
+ * Move domain to another cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the destination cpupool
+ * @parm domid id of the domain to move
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_movedomain(int xc_handle,
+                          uint32_t poolid,
+                          uint32_t domid);
+
+/**
+ * Return map of cpus not in any cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm cpumap pointer where to store the cpumap
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_freeinfo(int xc_handle,
+                        uint64_t *cpumap);
+
+
+/*
  * EVENT CHANNEL FUNCTIONS
  */
 
openSUSE Build Service is sponsored by