diff mbox

[v2,5/6] xen: credit2: optimize runq_candidate() a little bit

Message ID 150115716805.6767.12099050566492087569.stgit@Solace (mailing list archive)
State New, archived
Headers show

Commit Message

Dario Faggioli July 27, 2017, 12:06 p.m. UTC
By factoring into one (at the top) all the checks
to see whether current is the idle vcpu, and mark
it as unlikely().

In fact, if current is idle, all the logic for
dealing with yielding, context switching rate
limiting and soft-affinity, is just pure overhead,
and we better rush checking the runq and pick some
vcpu up.

Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
---
Cc: Anshul Makkar <anshulmakkar@gmail.com>
---
Changes from v1:
- for George: about what you said in
  <d3bf41b5-a152-8290-378f-3ff279b7e3ab@citrix.com>, I went for the "leave
  unset at declaration and set explicitly on both paths" apprach, i.e., the
  one you said you preferred (as I also like it better in this case). After
  doing that, I've applied your Reviewed-by, as you said I could.
---
 xen/common/sched_credit2.c |   18 ++++++++++++------
 1 file changed, 12 insertions(+), 6 deletions(-)
diff mbox

Patch

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 3f10b4b..30d9f55 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -2674,11 +2674,18 @@  runq_candidate(struct csched2_runqueue_data *rqd,
     struct list_head *iter;
     struct csched2_vcpu *snext = NULL;
     struct csched2_private *prv = csched2_priv(per_cpu(scheduler, cpu));
-    bool yield = __test_and_clear_bit(__CSFLAG_vcpu_yield, &scurr->flags);
-    bool soft_aff_preempt = false;
+    bool yield = false, soft_aff_preempt = false;
 
     *skipped = 0;
 
+    if ( unlikely(is_idle_vcpu(scurr->vcpu)) )
+    {
+        snext = scurr;
+        goto check_runq;
+    }
+
+    yield = __test_and_clear_bit(__CSFLAG_vcpu_yield, &scurr->flags);
+
     /*
      * Return the current vcpu if it has executed for less than ratelimit.
      * Adjuststment for the selected vcpu's credit and decision
@@ -2688,8 +2695,7 @@  runq_candidate(struct csched2_runqueue_data *rqd,
      * In fact, it may be the case that scurr is about to spin, and there's
      * no point forcing it to do so until rate limiting expires.
      */
-    if ( !yield && prv->ratelimit_us && !is_idle_vcpu(scurr->vcpu) &&
-         vcpu_runnable(scurr->vcpu) &&
+    if ( !yield && prv->ratelimit_us && vcpu_runnable(scurr->vcpu) &&
          (now - scurr->vcpu->runstate.state_entry_time) <
           MICROSECS(prv->ratelimit_us) )
     {
@@ -2710,8 +2716,7 @@  runq_candidate(struct csched2_runqueue_data *rqd,
     }
 
     /* If scurr has a soft-affinity, let's check whether cpu is part of it */
-    if ( !is_idle_vcpu(scurr->vcpu) &&
-         has_soft_affinity(scurr->vcpu, scurr->vcpu->cpu_hard_affinity) )
+    if ( has_soft_affinity(scurr->vcpu, scurr->vcpu->cpu_hard_affinity) )
     {
         affinity_balance_cpumask(scurr->vcpu, BALANCE_SOFT_AFFINITY,
                                  cpumask_scratch);
@@ -2750,6 +2755,7 @@  runq_candidate(struct csched2_runqueue_data *rqd,
     else
         snext = csched2_vcpu(idle_vcpu[cpu]);
 
+ check_runq:
     list_for_each( iter, &rqd->runq )
     {
         struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, runq_elem);