File 19276-cpu-selection-allocation-fix.patch of Package xen
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1237301132 0
# Node ID f4d64af7ee1b756da1987b50c2c76113ee8ddc13
# Parent 043c39eabde1a2118604bcf80574ccc717ff768d
Fix cpu selection at the time vCPU allocation
After cpu_[online/offline], set bits in cpu_online_map could be not
continuous. Use cycle_cpu() to pick the next one.
Signed-off-by: Xiaowei Yang <xiaowei.yang@intel.com>
xen-unstable changeset: 19276:a44751edcb7637103258d043e92490d561aec186
xen-unstable date: Fri Mar 06 18:54:09 2009 +0000
Index: xen-3.3.1-testing/xen/common/domctl.c
===================================================================
--- xen-3.3.1-testing.orig/xen/common/domctl.c
+++ xen-3.3.1-testing/xen/common/domctl.c
@@ -430,7 +430,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
cpu = (i == 0) ?
default_vcpu0_location() :
- (d->vcpu[i-1]->processor + 1) % num_online_cpus();
+ cycle_cpu(d->vcpu[i-1]->processor, cpu_online_map);
if ( alloc_vcpu(d, i, cpu) == NULL )
goto maxvcpu_out;
Index: xen-3.3.1-testing/xen/common/sched_credit.c
===================================================================
--- xen-3.3.1-testing.orig/xen/common/sched_credit.c
+++ xen-3.3.1-testing/xen/common/sched_credit.c
@@ -250,15 +250,6 @@ static struct csched_private csched_priv
static void csched_tick(void *_cpu);
static inline int
-__cycle_cpu(int cpu, const cpumask_t *mask)
-{
- int nxt = next_cpu(cpu, *mask);
- if (nxt == NR_CPUS)
- nxt = first_cpu(*mask);
- return nxt;
-}
-
-static inline int
__vcpu_on_runq(struct csched_vcpu *svc)
{
return !list_empty(&svc->runq_elem);
@@ -428,7 +419,7 @@ csched_cpu_pick(struct vcpu *vc)
cpus_and(cpus, cpu_online_map, vc->cpu_affinity);
cpu = cpu_isset(vc->processor, cpus)
? vc->processor
- : __cycle_cpu(vc->processor, &cpus);
+ : cycle_cpu(vc->processor, cpus);
ASSERT( !cpus_empty(cpus) && cpu_isset(cpu, cpus) );
/*
@@ -454,7 +445,7 @@ csched_cpu_pick(struct vcpu *vc)
cpumask_t nxt_idlers;
int nxt;
- nxt = __cycle_cpu(cpu, &cpus);
+ nxt = cycle_cpu(cpu, cpus);
if ( cpu_isset(cpu, cpu_core_map[nxt]) )
{
@@ -1128,7 +1119,7 @@ csched_load_balance(int cpu, struct csch
while ( !cpus_empty(workers) )
{
- peer_cpu = __cycle_cpu(peer_cpu, &workers);
+ peer_cpu = cycle_cpu(peer_cpu, workers);
cpu_clear(peer_cpu, workers);
/*
Index: xen-3.3.1-testing/xen/include/xen/cpumask.h
===================================================================
--- xen-3.3.1-testing.orig/xen/include/xen/cpumask.h
+++ xen-3.3.1-testing/xen/include/xen/cpumask.h
@@ -38,6 +38,8 @@
*
* int first_cpu(mask) Number lowest set bit, or NR_CPUS
* int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
+ * int last_cpu(mask) Number highest set bit, or NR_CPUS
+ * int cycle_cpu(cpu, mask) Next cpu cycling from 'cpu', or NR_CPUS
*
* cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
* CPU_MASK_ALL Initializer - all bits set
@@ -225,12 +227,23 @@ static inline int __next_cpu(int n, cons
#define last_cpu(src) __last_cpu(&(src), NR_CPUS)
static inline int __last_cpu(const cpumask_t *srcp, int nbits)
{
- int cpu, pcpu = NR_CPUS;
- for (cpu = first_cpu(*srcp); cpu < NR_CPUS; cpu = next_cpu(cpu, *srcp))
+ int cpu, pcpu = nbits;
+ for (cpu = __first_cpu(srcp, nbits);
+ cpu < nbits;
+ cpu = __next_cpu(cpu, srcp, nbits))
pcpu = cpu;
return pcpu;
}
+#define cycle_cpu(n, src) __cycle_cpu((n), &(src), NR_CPUS)
+static inline int __cycle_cpu(int n, const cpumask_t *srcp, int nbits)
+{
+ int nxt = __next_cpu(n, srcp, nbits);
+ if (nxt == nbits)
+ nxt = __first_cpu(srcp, nbits);
+ return nxt;
+}
+
#define cpumask_of_cpu(cpu) \
({ \
typeof(_unused_cpumask_arg_) m; \