@@ -668,7 +668,7 @@ __csched_vcpu_check(struct vcpu *vc)
BUG_ON( !is_idle_vcpu(vc) );
}
- SCHED_STAT_CRANK(vcpu_check);
+ SCHED_STAT_CRANK(unit_check);
}
#define CSCHED_VCPU_CHECK(_vc) (__csched_vcpu_check(_vc))
#else
@@ -692,7 +692,7 @@ __csched_vcpu_is_cache_hot(const struct csched_private *prv,
(NOW() - svc->last_sched_time) < prv->vcpu_migr_delay;
if ( hot )
- SCHED_STAT_CRANK(vcpu_hot);
+ SCHED_STAT_CRANK(unit_hot);
return hot;
}
@@ -881,7 +881,7 @@ __csched_vcpu_acct_start(struct csched_private *prv, struct csched_unit *svc)
if ( list_empty(&svc->active_vcpu_elem) )
{
SCHED_VCPU_STAT_CRANK(svc, state_active);
- SCHED_STAT_CRANK(acct_vcpu_active);
+ SCHED_STAT_CRANK(acct_unit_active);
sdom->active_vcpu_count++;
list_add(&svc->active_vcpu_elem, &sdom->active_vcpu);
@@ -908,7 +908,7 @@ __csched_vcpu_acct_stop_locked(struct csched_private *prv,
BUG_ON( list_empty(&svc->active_vcpu_elem) );
SCHED_VCPU_STAT_CRANK(svc, state_idle);
- SCHED_STAT_CRANK(acct_vcpu_idle);
+ SCHED_STAT_CRANK(acct_unit_idle);
BUG_ON( prv->weight < sdom->weight );
sdom->active_vcpu_count--;
@@ -1010,7 +1010,7 @@ csched_alloc_vdata(const struct scheduler *ops, struct sched_unit *unit,
svc->pri = is_idle_domain(vc->domain) ?
CSCHED_PRI_IDLE : CSCHED_PRI_TS_UNDER;
SCHED_VCPU_STATS_RESET(svc);
- SCHED_STAT_CRANK(vcpu_alloc);
+ SCHED_STAT_CRANK(unit_alloc);
return svc;
}
@@ -1038,7 +1038,7 @@ csched_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
unit_schedule_unlock_irq(lock, unit);
- SCHED_STAT_CRANK(vcpu_insert);
+ SCHED_STAT_CRANK(unit_insert);
}
static void
@@ -1058,13 +1058,13 @@ csched_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
struct csched_unit * const svc = CSCHED_UNIT(unit);
struct csched_dom * const sdom = svc->sdom;
- SCHED_STAT_CRANK(vcpu_remove);
+ SCHED_STAT_CRANK(unit_remove);
ASSERT(!__vcpu_on_runq(svc));
if ( test_and_clear_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) )
{
- SCHED_STAT_CRANK(vcpu_unpark);
+ SCHED_STAT_CRANK(unit_unpark);
vcpu_unpause(svc->vcpu);
}
@@ -1085,7 +1085,7 @@ csched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
struct csched_unit * const svc = CSCHED_UNIT(unit);
unsigned int cpu = vc->processor;
- SCHED_STAT_CRANK(vcpu_sleep);
+ SCHED_STAT_CRANK(unit_sleep);
BUG_ON( is_idle_vcpu(vc) );
@@ -1114,19 +1114,19 @@ csched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
if ( unlikely(curr_on_cpu(vc->processor) == unit) )
{
- SCHED_STAT_CRANK(vcpu_wake_running);
+ SCHED_STAT_CRANK(unit_wake_running);
return;
}
if ( unlikely(__vcpu_on_runq(svc)) )
{
- SCHED_STAT_CRANK(vcpu_wake_onrunq);
+ SCHED_STAT_CRANK(unit_wake_onrunq);
return;
}
if ( likely(vcpu_runnable(vc)) )
- SCHED_STAT_CRANK(vcpu_wake_runnable);
+ SCHED_STAT_CRANK(unit_wake_runnable);
else
- SCHED_STAT_CRANK(vcpu_wake_not_runnable);
+ SCHED_STAT_CRANK(unit_wake_not_runnable);
/*
* We temporarly boost the priority of awaking VCPUs!
@@ -1156,7 +1156,7 @@ csched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
!test_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) )
{
TRACE_2D(TRC_CSCHED_BOOST_START, vc->domain->domain_id, vc->vcpu_id);
- SCHED_STAT_CRANK(vcpu_boost);
+ SCHED_STAT_CRANK(unit_boost);
svc->pri = CSCHED_PRI_TS_BOOST;
}
@@ -1515,7 +1515,7 @@ csched_acct(void* dummy)
credit < -credit_cap &&
!test_and_set_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) )
{
- SCHED_STAT_CRANK(vcpu_park);
+ SCHED_STAT_CRANK(unit_park);
vcpu_pause_nosync(svc->vcpu);
}
@@ -1539,7 +1539,7 @@ csched_acct(void* dummy)
* call to make sure the VCPU's priority is not boosted
* if it is woken up here.
*/
- SCHED_STAT_CRANK(vcpu_unpark);
+ SCHED_STAT_CRANK(unit_unpark);
vcpu_unpause(svc->vcpu);
clear_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags);
}
@@ -2020,7 +2020,7 @@ csched2_vcpu_check(struct vcpu *vc)
{
BUG_ON( !is_idle_vcpu(vc) );
}
- SCHED_STAT_CRANK(vcpu_check);
+ SCHED_STAT_CRANK(unit_check);
}
#define CSCHED2_VCPU_CHECK(_vc) (csched2_vcpu_check(_vc))
#else
@@ -2067,7 +2067,7 @@ csched2_alloc_vdata(const struct scheduler *ops, struct sched_unit *unit,
svc->budget_quota = 0;
INIT_LIST_HEAD(&svc->parked_elem);
- SCHED_STAT_CRANK(vcpu_alloc);
+ SCHED_STAT_CRANK(unit_alloc);
return svc;
}
@@ -2079,7 +2079,7 @@ csched2_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
struct csched2_unit * const svc = csched2_unit(unit);
ASSERT(!is_idle_vcpu(vc));
- SCHED_STAT_CRANK(vcpu_sleep);
+ SCHED_STAT_CRANK(unit_sleep);
if ( curr_on_cpu(vc->processor) == unit )
{
@@ -2109,20 +2109,20 @@ csched2_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
if ( unlikely(curr_on_cpu(cpu) == unit) )
{
- SCHED_STAT_CRANK(vcpu_wake_running);
+ SCHED_STAT_CRANK(unit_wake_running);
goto out;
}
if ( unlikely(vcpu_on_runq(svc)) )
{
- SCHED_STAT_CRANK(vcpu_wake_onrunq);
+ SCHED_STAT_CRANK(unit_wake_onrunq);
goto out;
}
if ( likely(vcpu_runnable(vc)) )
- SCHED_STAT_CRANK(vcpu_wake_runnable);
+ SCHED_STAT_CRANK(unit_wake_runnable);
else
- SCHED_STAT_CRANK(vcpu_wake_not_runnable);
+ SCHED_STAT_CRANK(unit_wake_not_runnable);
/* If the context hasn't been saved for this vcpu yet, we can't put it on
* another runqueue. Instead, we set a flag so that it will be put on the runqueue
@@ -3138,7 +3138,7 @@ csched2_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
sdom->nr_vcpus++;
- SCHED_STAT_CRANK(vcpu_insert);
+ SCHED_STAT_CRANK(unit_insert);
CSCHED2_VCPU_CHECK(vc);
}
@@ -3161,7 +3161,7 @@ csched2_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
ASSERT(!is_idle_vcpu(vc));
ASSERT(list_empty(&svc->runq_elem));
- SCHED_STAT_CRANK(vcpu_remove);
+ SCHED_STAT_CRANK(unit_remove);
/* Remove from runqueue */
lock = unit_schedule_lock_irq(unit);
@@ -199,7 +199,7 @@ static void *null_alloc_vdata(const struct scheduler *ops,
INIT_LIST_HEAD(&nvc->waitq_elem);
nvc->vcpu = v;
- SCHED_STAT_CRANK(vcpu_alloc);
+ SCHED_STAT_CRANK(unit_alloc);
return nvc;
}
@@ -502,7 +502,7 @@ static void null_unit_insert(const struct scheduler *ops,
}
spin_unlock_irq(lock);
- SCHED_STAT_CRANK(vcpu_insert);
+ SCHED_STAT_CRANK(unit_insert);
}
static void null_unit_remove(const struct scheduler *ops,
@@ -540,7 +540,7 @@ static void null_unit_remove(const struct scheduler *ops,
out:
unit_schedule_unlock_irq(lock, unit);
- SCHED_STAT_CRANK(vcpu_remove);
+ SCHED_STAT_CRANK(unit_remove);
}
static void null_unit_wake(const struct scheduler *ops,
@@ -555,21 +555,21 @@ static void null_unit_wake(const struct scheduler *ops,
if ( unlikely(curr_on_cpu(cpu) == unit) )
{
- SCHED_STAT_CRANK(vcpu_wake_running);
+ SCHED_STAT_CRANK(unit_wake_running);
return;
}
if ( unlikely(!list_empty(&nvc->waitq_elem)) )
{
/* Not exactly "on runq", but close enough for reusing the counter */
- SCHED_STAT_CRANK(vcpu_wake_onrunq);
+ SCHED_STAT_CRANK(unit_wake_onrunq);
return;
}
if ( likely(vcpu_runnable(v)) )
- SCHED_STAT_CRANK(vcpu_wake_runnable);
+ SCHED_STAT_CRANK(unit_wake_runnable);
else
- SCHED_STAT_CRANK(vcpu_wake_not_runnable);
+ SCHED_STAT_CRANK(unit_wake_not_runnable);
/*
* If a vcpu is neither on a pCPU nor in the waitqueue, it means it was
@@ -649,7 +649,7 @@ static void null_unit_sleep(const struct scheduler *ops,
if ( likely(!tickled && curr_on_cpu(cpu) == unit) )
cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
- SCHED_STAT_CRANK(vcpu_sleep);
+ SCHED_STAT_CRANK(unit_sleep);
}
static struct sched_resource *
@@ -770,7 +770,7 @@ static inline void null_vcpu_check(struct vcpu *v)
else
BUG_ON(!is_idle_vcpu(v));
- SCHED_STAT_CRANK(vcpu_check);
+ SCHED_STAT_CRANK(unit_check);
}
#define NULL_VCPU_CHECK(v) (null_vcpu_check(v))
#else
@@ -862,7 +862,7 @@ rt_alloc_vdata(const struct scheduler *ops, struct sched_unit *unit, void *dd)
if ( !is_idle_vcpu(vc) )
svc->budget = RTDS_DEFAULT_BUDGET;
- SCHED_STAT_CRANK(vcpu_alloc);
+ SCHED_STAT_CRANK(unit_alloc);
return svc;
}
@@ -911,7 +911,7 @@ rt_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
}
unit_schedule_unlock_irq(lock, unit);
- SCHED_STAT_CRANK(vcpu_insert);
+ SCHED_STAT_CRANK(unit_insert);
}
/*
@@ -924,7 +924,7 @@ rt_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
struct rt_dom * const sdom = svc->sdom;
spinlock_t *lock;
- SCHED_STAT_CRANK(vcpu_remove);
+ SCHED_STAT_CRANK(unit_remove);
BUG_ON( sdom == NULL );
@@ -1146,7 +1146,7 @@ rt_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
struct rt_unit * const svc = rt_unit(unit);
BUG_ON( is_idle_vcpu(vc) );
- SCHED_STAT_CRANK(vcpu_sleep);
+ SCHED_STAT_CRANK(unit_sleep);
if ( curr_on_cpu(vc->processor) == unit )
cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
@@ -1267,21 +1267,21 @@ rt_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
if ( unlikely(curr_on_cpu(vc->processor) == unit) )
{
- SCHED_STAT_CRANK(vcpu_wake_running);
+ SCHED_STAT_CRANK(unit_wake_running);
return;
}
/* on RunQ/DepletedQ, just update info is ok */
if ( unlikely(vcpu_on_q(svc)) )
{
- SCHED_STAT_CRANK(vcpu_wake_onrunq);
+ SCHED_STAT_CRANK(unit_wake_onrunq);
return;
}
if ( likely(vcpu_runnable(vc)) )
- SCHED_STAT_CRANK(vcpu_wake_runnable);
+ SCHED_STAT_CRANK(unit_wake_runnable);
else
- SCHED_STAT_CRANK(vcpu_wake_not_runnable);
+ SCHED_STAT_CRANK(unit_wake_not_runnable);
/*
* If a deadline passed while svc was asleep/blocked, we need new
@@ -21,20 +21,20 @@ PERFCOUNTER(sched_ctx, "sched: context switches")
PERFCOUNTER(schedule, "sched: specific scheduler")
PERFCOUNTER(dom_init, "sched: dom_init")
PERFCOUNTER(dom_destroy, "sched: dom_destroy")
-PERFCOUNTER(vcpu_alloc, "sched: vcpu_alloc")
-PERFCOUNTER(vcpu_insert, "sched: vcpu_insert")
-PERFCOUNTER(vcpu_remove, "sched: vcpu_remove")
-PERFCOUNTER(vcpu_sleep, "sched: vcpu_sleep")
PERFCOUNTER(vcpu_yield, "sched: vcpu_yield")
-PERFCOUNTER(vcpu_wake_running, "sched: vcpu_wake_running")
-PERFCOUNTER(vcpu_wake_onrunq, "sched: vcpu_wake_onrunq")
-PERFCOUNTER(vcpu_wake_runnable, "sched: vcpu_wake_runnable")
-PERFCOUNTER(vcpu_wake_not_runnable, "sched: vcpu_wake_not_runnable")
+PERFCOUNTER(unit_alloc, "sched: unit_alloc")
+PERFCOUNTER(unit_insert, "sched: unit_insert")
+PERFCOUNTER(unit_remove, "sched: unit_remove")
+PERFCOUNTER(unit_sleep, "sched: unit_sleep")
+PERFCOUNTER(unit_wake_running, "sched: unit_wake_running")
+PERFCOUNTER(unit_wake_onrunq, "sched: unit_wake_onrunq")
+PERFCOUNTER(unit_wake_runnable, "sched: unit_wake_runnable")
+PERFCOUNTER(unit_wake_not_runnable, "sched: unit_wake_not_runnable")
PERFCOUNTER(tickled_no_cpu, "sched: tickled_no_cpu")
PERFCOUNTER(tickled_idle_cpu, "sched: tickled_idle_cpu")
PERFCOUNTER(tickled_idle_cpu_excl, "sched: tickled_idle_cpu_exclusive")
PERFCOUNTER(tickled_busy_cpu, "sched: tickled_busy_cpu")
-PERFCOUNTER(vcpu_check, "sched: vcpu_check")
+PERFCOUNTER(unit_check, "sched: unit_check")
/* credit specific counters */
PERFCOUNTER(delay_ms, "csched: delay")
@@ -43,11 +43,11 @@ PERFCOUNTER(acct_no_work, "csched: acct_no_work")
PERFCOUNTER(acct_balance, "csched: acct_balance")
PERFCOUNTER(acct_reorder, "csched: acct_reorder")
PERFCOUNTER(acct_min_credit, "csched: acct_min_credit")
-PERFCOUNTER(acct_vcpu_active, "csched: acct_vcpu_active")
-PERFCOUNTER(acct_vcpu_idle, "csched: acct_vcpu_idle")
-PERFCOUNTER(vcpu_boost, "csched: vcpu_boost")
-PERFCOUNTER(vcpu_park, "csched: vcpu_park")
-PERFCOUNTER(vcpu_unpark, "csched: vcpu_unpark")
+PERFCOUNTER(acct_unit_active, "csched: acct_unit_active")
+PERFCOUNTER(acct_unit_idle, "csched: acct_unit_idle")
+PERFCOUNTER(unit_boost, "csched: unit_boost")
+PERFCOUNTER(unit_park, "csched: unit_park")
+PERFCOUNTER(unit_unpark, "csched: unit_unpark")
PERFCOUNTER(load_balance_idle, "csched: load_balance_idle")
PERFCOUNTER(load_balance_over, "csched: load_balance_over")
PERFCOUNTER(load_balance_other, "csched: load_balance_other")
@@ -57,7 +57,7 @@ PERFCOUNTER(steal_peer_idle, "csched: steal_peer_idle")
PERFCOUNTER(migrate_queued, "csched: migrate_queued")
PERFCOUNTER(migrate_running, "csched: migrate_running")
PERFCOUNTER(migrate_kicked_away, "csched: migrate_kicked_away")
-PERFCOUNTER(vcpu_hot, "csched: vcpu_hot")
+PERFCOUNTER(unit_hot, "csched: unit_hot")
/* credit2 specific counters */
PERFCOUNTER(burn_credits_t2c, "csched2: burn_credits_t2c")
Rename the scheduler related perf counters from vcpu* to unit* where appropriate. Signed-off-by: Juergen Gross <jgross@suse.com> --- xen/common/sched_credit.c | 32 ++++++++++++++++---------------- xen/common/sched_credit2.c | 18 +++++++++--------- xen/common/sched_null.c | 18 +++++++++--------- xen/common/sched_rt.c | 16 ++++++++-------- xen/include/xen/perfc_defn.h | 30 +++++++++++++++--------------- 5 files changed, 57 insertions(+), 57 deletions(-)