File 56546afb-sched-better-handle-not-inserting-idle-vCPUs.patch of Package xen.6121
References: bsc#991934
# Commit 6b53bb4ab3c9bd5eccde88a5175cf72589ba6d52
# Date 2015-11-24 14:49:47 +0100
# Author Dario Faggioli <dario.faggioli@citrix.com>
# Committer Jan Beulich <jbeulich@suse.com>
sched: better handle (not) inserting idle vCPUs in runqueues
Idle vCPUs are set to run immediately, as a part of their
own initialization, so we shouldn't even try to put them
in a runqueue. In fact, no scheduler does that, even when
asked to (that is rather explicit in Credit2 and RTDS, a
bit less evident in Credit1).
Let's make things look as follows:
- in generic code, explicitly avoid even trying to
insert idle vCPUs in runqueues;
- in specific schedulers' code, enforce that.
Note that, as csched_vcpu_insert() is no longer being
called, during boot (from sched_init_vcpu()) we can
safely avoid saving the flags when taking the runqueue
lock.
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
Acked-by: George Dunlap <george.dunlap@eu.citrix.com>
Reviewed-by: Juergen Gross <jgross@suse.com>
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -911,14 +911,15 @@ csched_vcpu_insert(const struct schedule
{
struct csched_vcpu *svc = vc->sched_priv;
spinlock_t *lock;
- unsigned long flags;
- lock = vcpu_schedule_lock_irqsave(vc, &flags);
+ BUG_ON( is_idle_vcpu(vc) );
+
+ lock = vcpu_schedule_lock_irq(vc);
if ( !__vcpu_on_runq(svc) && vcpu_runnable(vc) && !vc->is_running )
__runq_insert(vc->processor, svc);
- vcpu_schedule_unlock_irqrestore(lock, flags, vc);
+ vcpu_schedule_unlock_irq(lock, vc);
}
static void
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -873,29 +873,24 @@ csched_vcpu_insert(const struct schedule
struct csched_vcpu *svc = vc->sched_priv;
struct domain * const dom = vc->domain;
struct csched_dom * const sdom = svc->sdom;
+ spinlock_t *lock;
printk("%s: Inserting d%dv%d\n",
__func__, dom->domain_id, vc->vcpu_id);
- /* NB: On boot, idle vcpus are inserted before alloc_pdata() has
- * been called for that cpu.
- */
- if ( ! is_idle_vcpu(vc) )
- {
- spinlock_t *lock;
+ BUG_ON(is_idle_vcpu(vc));
- /* FIXME: Do we need the private lock here? */
- list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
+ /* FIXME: Do we need the private lock here? */
+ list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
- /* Add vcpu to runqueue of initial processor */
- lock = vcpu_schedule_lock_irq(vc);
+ /* Add vcpu to runqueue of initial processor */
+ lock = vcpu_schedule_lock_irq(vc);
- runq_assign(ops, vc);
+ runq_assign(ops, vc);
- vcpu_schedule_unlock_irq(lock, vc);
+ vcpu_schedule_unlock_irq(lock, vc);
- sdom->nr_vcpus++;
- }
+ sdom->nr_vcpus++;
CSCHED_VCPU_CHECK(vc);
}
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -206,20 +206,22 @@ int sched_init_vcpu(struct vcpu *v, unsi
init_timer(&v->poll_timer, poll_timer_fn,
v, v->processor);
- /* Idle VCPUs are scheduled immediately. */
+ v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v, d->sched_priv);
+ if ( v->sched_priv == NULL )
+ return 1;
+
+ TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);
+
+ /* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */
if ( is_idle_domain(d) )
{
per_cpu(schedule_data, v->processor).curr = v;
v->is_running = 1;
}
-
- TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);
-
- v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v, d->sched_priv);
- if ( v->sched_priv == NULL )
- return 1;
-
- SCHED_OP(DOM2OP(d), insert_vcpu, v);
+ else
+ {
+ SCHED_OP(DOM2OP(d), insert_vcpu, v);
+ }
return 0;
}