diff mbox

[1/6] xen: credit1: simplify csched_runq_steal() a little bit.

Message ID 148845108437.23452.18282287504552403796.stgit@Solace.fritz.box (mailing list archive)
State New, archived
Headers show

Commit Message

Dario Faggioli March 2, 2017, 10:38 a.m. UTC
Since we're holding the lock on the pCPU from which we
are trying to steal, it can't have disappeared, so we
can drop the check for that (and convert it in an
ASSERT()).

And since we try to steal only from busy pCPUs, it's
unlikely for such pCPU to be idle, so we mark it as
such (and bail early if it unfortunately is).

Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
---
Cc: George Dunlap <george.dunlap@eu.citrix.com>
---
 xen/common/sched_credit.c |   87 +++++++++++++++++++++++----------------------
 1 file changed, 44 insertions(+), 43 deletions(-)

Comments

Anshul Makkar March 3, 2017, 9:35 a.m. UTC | #1
On 02/03/17 10:38, Dario Faggioli wrote:
> Since we're holding the lock on the pCPU from which we
> are trying to steal, it can't have disappeared, so we
> can drop the check for that (and convert it in an
> ASSERT()).
>
> And since we try to steal only from busy pCPUs, it's
> unlikely for such pCPU to be idle, so we mark it as
> such (and bail early if it unfortunately is).
>
> Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
> ---
> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> ---
>   xen/common/sched_credit.c |   87 +++++++++++++++++++++++----------------------
>   1 file changed, 44 insertions(+), 43 deletions(-)
>
> diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
> index 4649e64..63a8675 100644
> --- a/xen/common/sched_credit.c
> +++ b/xen/common/sched_credit.c
> @@ -1593,64 +1593,65 @@ static struct csched_vcpu *
>   csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
>   {
>       const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu);
> -    const struct vcpu * const peer_vcpu = curr_on_cpu(peer_cpu);
>       struct csched_vcpu *speer;
>       struct list_head *iter;
>       struct vcpu *vc;
>   
> +    ASSERT(peer_pcpu != NULL);
> +
>       /*
>        * Don't steal from an idle CPU's runq because it's about to
>        * pick up work from it itself.
>        */
> -    if ( peer_pcpu != NULL && !is_idle_vcpu(peer_vcpu) )
> +    if ( unlikely(is_idle_vcpu(curr_on_cpu(peer_cpu))) )
> +        goto out;
We can just use if (!is_idle_vcpu(peer_vcpu)). Why to replace it with 
some code that introduces an unnecessary branch statement.
> +
> +    list_for_each( iter, &peer_pcpu->runq )
>       {
> -        list_for_each( iter, &peer_pcpu->runq )
> -        {
> -            speer = __runq_elem(iter);
> +        speer = __runq_elem(iter);
>   
> -            /*
> -             * If next available VCPU here is not of strictly higher
> -             * priority than ours, this PCPU is useless to us.
> -             */
> -            if ( speer->pri <= pri )
> -                break;
> +        /*
> +         * If next available VCPU here is not of strictly higher
> +         * priority than ours, this PCPU is useless to us.
> +         */
> +        if ( speer->pri <= pri )
> +            break;
>   
> -            /* Is this VCPU runnable on our PCPU? */
> -            vc = speer->vcpu;
> -            BUG_ON( is_idle_vcpu(vc) );
> +        /* Is this VCPU runnable on our PCPU? */
> +        vc = speer->vcpu;
> +        BUG_ON( is_idle_vcpu(vc) );
>   
> -            /*
> -             * If the vcpu has no useful soft affinity, skip this vcpu.
> -             * In fact, what we want is to check if we have any "soft-affine
> -             * work" to steal, before starting to look at "hard-affine work".
> -             *
> -             * Notice that, if not even one vCPU on this runq has a useful
> -             * soft affinity, we could have avoid considering this runq for
> -             * a soft balancing step in the first place. This, for instance,
> -             * can be implemented by taking note of on what runq there are
> -             * vCPUs with useful soft affinities in some sort of bitmap
> -             * or counter.
> -             */
> -            if ( balance_step == CSCHED_BALANCE_SOFT_AFFINITY
> -                 && !__vcpu_has_soft_affinity(vc, vc->cpu_hard_affinity) )
> -                continue;
> +        /*
> +         * If the vcpu has no useful soft affinity, skip this vcpu.
> +         * In fact, what we want is to check if we have any "soft-affine
> +         * work" to steal, before starting to look at "hard-affine work".
> +         *
> +         * Notice that, if not even one vCPU on this runq has a useful
> +         * soft affinity, we could have avoid considering this runq for
> +         * a soft balancing step in the first place. This, for instance,
> +         * can be implemented by taking note of on what runq there are
> +         * vCPUs with useful soft affinities in some sort of bitmap
> +         * or counter.
> +         */
Isn't it a better approach that now as we have came across a vcpu which 
doesn't have a desired soft affinity but is a potential candidate for 
migration, so instead of just forgetting it,  lets save the information 
for such vcpus in some data structure in some order so that we don't 
have to scan them again if we don't find anything useful in the present run.
> +        if ( balance_step == CSCHED_BALANCE_SOFT_AFFINITY
> +             && !__vcpu_has_soft_affinity(vc, vc->cpu_hard_affinity) )
> +            continue;
>   
> -            csched_balance_cpumask(vc, balance_step, cpumask_scratch);
> -            if ( __csched_vcpu_is_migrateable(vc, cpu, cpumask_scratch) )
> -            {
> -                /* We got a candidate. Grab it! */
> -                TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu,
> -                         vc->domain->domain_id, vc->vcpu_id);
> -                SCHED_VCPU_STAT_CRANK(speer, migrate_q);
> -                SCHED_STAT_CRANK(migrate_queued);
> -                WARN_ON(vc->is_urgent);
> -                __runq_remove(speer);
> -                vc->processor = cpu;
> -                return speer;
> -            }
> +        csched_balance_cpumask(vc, balance_step, cpumask_scratch);
> +        if ( __csched_vcpu_is_migrateable(vc, cpu, cpumask_scratch) )
> +        {
> +            /* We got a candidate. Grab it! */
> +            TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu,
> +                     vc->domain->domain_id, vc->vcpu_id);
> +            SCHED_VCPU_STAT_CRANK(speer, migrate_q);
> +            SCHED_STAT_CRANK(migrate_queued);
> +            WARN_ON(vc->is_urgent);
> +            __runq_remove(speer);
> +            vc->processor = cpu;
> +            return speer;
>           }
>       }
> -
> + out:
>       SCHED_STAT_CRANK(steal_peer_idle);
>       return NULL;
>   }
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
Anshul
Dario Faggioli March 3, 2017, 1:39 p.m. UTC | #2
On Fri, 2017-03-03 at 09:35 +0000, anshul makkar wrote:
> On 02/03/17 10:38, Dario Faggioli wrote:
> > --- a/xen/common/sched_credit.c
> > +++ b/xen/common/sched_credit.c
> > @@ -1593,64 +1593,65 @@ static struct csched_vcpu *
> >   csched_runq_steal(int peer_cpu, int cpu, int pri, int
> > balance_step)
> >   {
> >       const struct csched_pcpu * const peer_pcpu =
> > CSCHED_PCPU(peer_cpu);
> > -    const struct vcpu * const peer_vcpu = curr_on_cpu(peer_cpu);
> >       struct csched_vcpu *speer;
> >       struct list_head *iter;
> >       struct vcpu *vc;
> >   
> > +    ASSERT(peer_pcpu != NULL);
> > +
> >       /*
> >        * Don't steal from an idle CPU's runq because it's about to
> >        * pick up work from it itself.
> >        */
> > -    if ( peer_pcpu != NULL && !is_idle_vcpu(peer_vcpu) )
> > +    if ( unlikely(is_idle_vcpu(curr_on_cpu(peer_cpu))) )
> > +        goto out;
> We can just use if (!is_idle_vcpu(peer_vcpu)). Why to replace it
> with 
> some code that introduces an unnecessary branch statement.
>
Mmm... I don't think I understand what this means.

> > +        /*
> > +         * If the vcpu has no useful soft affinity, skip this
> > vcpu.
> > +         * In fact, what we want is to check if we have any "soft-
> > affine
> > +         * work" to steal, before starting to look at "hard-affine 
> > work".
> > +         *
> > +         * Notice that, if not even one vCPU on this runq has a
> > useful
> > +         * soft affinity, we could have avoid considering this
> > runq for
> > +         * a soft balancing step in the first place. This, for
> > instance,
> > +         * can be implemented by taking note of on what runq there
> > are
> > +         * vCPUs with useful soft affinities in some sort of
> > bitmap
> > +         * or counter.
> > +         */
>
> Isn't it a better approach that now as we have came across a vcpu
> which 
> doesn't have a desired soft affinity but is a potential candidate
> for 
> migration, so instead of just forgetting it,  lets save the
> information 
> for such vcpus in some data structure in some order so that we don't 
> have to scan them again if we don't find anything useful in the
> present run.
>
So, AFAIUI, you're suggesting something like this:
 1. for each vcpu in the runqueue, we check soft-affinity. If it 
    matches, we're done;
 2. if it does not match, we check hard-affinity. If it matches, we 
    save that vcpu somewhere. We only need to save one vcpu, the first 
    one that we find to have matching hard-affinity;
 3. if we don't find any vcpu with matching soft affinity, we steal 
    the one we've saved.

Is this correct? If yes, if there's not anything I'm overlooking, I
think it could work.

It's a separate patch, of course. I can try putting that together,
unless of course you want to give this a go yourself. :-)

Thanks and Regards,
Dario
diff mbox

Patch

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 4649e64..63a8675 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1593,64 +1593,65 @@  static struct csched_vcpu *
 csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
 {
     const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu);
-    const struct vcpu * const peer_vcpu = curr_on_cpu(peer_cpu);
     struct csched_vcpu *speer;
     struct list_head *iter;
     struct vcpu *vc;
 
+    ASSERT(peer_pcpu != NULL);
+
     /*
      * Don't steal from an idle CPU's runq because it's about to
      * pick up work from it itself.
      */
-    if ( peer_pcpu != NULL && !is_idle_vcpu(peer_vcpu) )
+    if ( unlikely(is_idle_vcpu(curr_on_cpu(peer_cpu))) )
+        goto out;
+
+    list_for_each( iter, &peer_pcpu->runq )
     {
-        list_for_each( iter, &peer_pcpu->runq )
-        {
-            speer = __runq_elem(iter);
+        speer = __runq_elem(iter);
 
-            /*
-             * If next available VCPU here is not of strictly higher
-             * priority than ours, this PCPU is useless to us.
-             */
-            if ( speer->pri <= pri )
-                break;
+        /*
+         * If next available VCPU here is not of strictly higher
+         * priority than ours, this PCPU is useless to us.
+         */
+        if ( speer->pri <= pri )
+            break;
 
-            /* Is this VCPU runnable on our PCPU? */
-            vc = speer->vcpu;
-            BUG_ON( is_idle_vcpu(vc) );
+        /* Is this VCPU runnable on our PCPU? */
+        vc = speer->vcpu;
+        BUG_ON( is_idle_vcpu(vc) );
 
-            /*
-             * If the vcpu has no useful soft affinity, skip this vcpu.
-             * In fact, what we want is to check if we have any "soft-affine
-             * work" to steal, before starting to look at "hard-affine work".
-             *
-             * Notice that, if not even one vCPU on this runq has a useful
-             * soft affinity, we could have avoid considering this runq for
-             * a soft balancing step in the first place. This, for instance,
-             * can be implemented by taking note of on what runq there are
-             * vCPUs with useful soft affinities in some sort of bitmap
-             * or counter.
-             */
-            if ( balance_step == CSCHED_BALANCE_SOFT_AFFINITY
-                 && !__vcpu_has_soft_affinity(vc, vc->cpu_hard_affinity) )
-                continue;
+        /*
+         * If the vcpu has no useful soft affinity, skip this vcpu.
+         * In fact, what we want is to check if we have any "soft-affine
+         * work" to steal, before starting to look at "hard-affine work".
+         *
+         * Notice that, if not even one vCPU on this runq has a useful
+         * soft affinity, we could have avoid considering this runq for
+         * a soft balancing step in the first place. This, for instance,
+         * can be implemented by taking note of on what runq there are
+         * vCPUs with useful soft affinities in some sort of bitmap
+         * or counter.
+         */
+        if ( balance_step == CSCHED_BALANCE_SOFT_AFFINITY
+             && !__vcpu_has_soft_affinity(vc, vc->cpu_hard_affinity) )
+            continue;
 
-            csched_balance_cpumask(vc, balance_step, cpumask_scratch);
-            if ( __csched_vcpu_is_migrateable(vc, cpu, cpumask_scratch) )
-            {
-                /* We got a candidate. Grab it! */
-                TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu,
-                         vc->domain->domain_id, vc->vcpu_id);
-                SCHED_VCPU_STAT_CRANK(speer, migrate_q);
-                SCHED_STAT_CRANK(migrate_queued);
-                WARN_ON(vc->is_urgent);
-                __runq_remove(speer);
-                vc->processor = cpu;
-                return speer;
-            }
+        csched_balance_cpumask(vc, balance_step, cpumask_scratch);
+        if ( __csched_vcpu_is_migrateable(vc, cpu, cpumask_scratch) )
+        {
+            /* We got a candidate. Grab it! */
+            TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu,
+                     vc->domain->domain_id, vc->vcpu_id);
+            SCHED_VCPU_STAT_CRANK(speer, migrate_q);
+            SCHED_STAT_CRANK(migrate_queued);
+            WARN_ON(vc->is_urgent);
+            __runq_remove(speer);
+            vc->processor = cpu;
+            return speer;
         }
     }
-
+ out:
     SCHED_STAT_CRANK(steal_peer_idle);
     return NULL;
 }