@@ -684,7 +684,7 @@ __csched_vcpu_check(struct vcpu *vc)
BUG_ON( !is_idle_vcpu(vc) );
}
- SCHED_STAT_CRANK(vcpu_check);
+ SCHED_STAT_CRANK(item_check);
}
#define CSCHED_VCPU_CHECK(_vc) (__csched_vcpu_check(_vc))
#else
@@ -707,7 +707,7 @@ __csched_vcpu_is_cache_hot(const struct csched_private *prv, struct vcpu *v)
(NOW() - v->sched_item->last_run_time) < prv->vcpu_migr_delay;
if ( hot )
- SCHED_STAT_CRANK(vcpu_hot);
+ SCHED_STAT_CRANK(item_hot);
return hot;
}
@@ -895,7 +895,7 @@ __csched_vcpu_acct_start(struct csched_private *prv, struct csched_item *svc)
if ( list_empty(&svc->active_vcpu_elem) )
{
SCHED_VCPU_STAT_CRANK(svc, state_active);
- SCHED_STAT_CRANK(acct_vcpu_active);
+ SCHED_STAT_CRANK(acct_item_active);
sdom->active_vcpu_count++;
list_add(&svc->active_vcpu_elem, &sdom->active_vcpu);
@@ -922,7 +922,7 @@ __csched_vcpu_acct_stop_locked(struct csched_private *prv,
BUG_ON( list_empty(&svc->active_vcpu_elem) );
SCHED_VCPU_STAT_CRANK(svc, state_idle);
- SCHED_STAT_CRANK(acct_vcpu_idle);
+ SCHED_STAT_CRANK(acct_item_idle);
BUG_ON( prv->weight < sdom->weight );
sdom->active_vcpu_count--;
@@ -1024,7 +1024,7 @@ csched_alloc_vdata(const struct scheduler *ops, struct sched_item *item,
svc->pri = is_idle_domain(vc->domain) ?
CSCHED_PRI_IDLE : CSCHED_PRI_TS_UNDER;
SCHED_VCPU_STATS_RESET(svc);
- SCHED_STAT_CRANK(vcpu_alloc);
+ SCHED_STAT_CRANK(item_alloc);
return svc;
}
@@ -1052,7 +1052,7 @@ csched_item_insert(const struct scheduler *ops, struct sched_item *item)
item_schedule_unlock_irq(lock, item);
- SCHED_STAT_CRANK(vcpu_insert);
+ SCHED_STAT_CRANK(item_insert);
}
static void
@@ -1072,13 +1072,13 @@ csched_item_remove(const struct scheduler *ops, struct sched_item *item)
struct csched_item * const svc = CSCHED_ITEM(item);
struct csched_dom * const sdom = svc->sdom;
- SCHED_STAT_CRANK(vcpu_remove);
+ SCHED_STAT_CRANK(item_remove);
ASSERT(!__vcpu_on_runq(svc));
if ( test_and_clear_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) )
{
- SCHED_STAT_CRANK(vcpu_unpark);
+ SCHED_STAT_CRANK(item_unpark);
vcpu_unpause(svc->vcpu);
}
@@ -1099,7 +1099,7 @@ csched_item_sleep(const struct scheduler *ops, struct sched_item *item)
struct csched_item * const svc = CSCHED_ITEM(item);
unsigned int cpu = vc->processor;
- SCHED_STAT_CRANK(vcpu_sleep);
+ SCHED_STAT_CRANK(item_sleep);
BUG_ON( is_idle_vcpu(vc) );
@@ -1128,19 +1128,19 @@ csched_item_wake(const struct scheduler *ops, struct sched_item *item)
if ( unlikely(curr_on_cpu(vc->processor) == item) )
{
- SCHED_STAT_CRANK(vcpu_wake_running);
+ SCHED_STAT_CRANK(item_wake_running);
return;
}
if ( unlikely(__vcpu_on_runq(svc)) )
{
- SCHED_STAT_CRANK(vcpu_wake_onrunq);
+ SCHED_STAT_CRANK(item_wake_onrunq);
return;
}
if ( likely(vcpu_runnable(vc)) )
- SCHED_STAT_CRANK(vcpu_wake_runnable);
+ SCHED_STAT_CRANK(item_wake_runnable);
else
- SCHED_STAT_CRANK(vcpu_wake_not_runnable);
+ SCHED_STAT_CRANK(item_wake_not_runnable);
/*
* We temporarly boost the priority of awaking VCPUs!
@@ -1170,7 +1170,7 @@ csched_item_wake(const struct scheduler *ops, struct sched_item *item)
!test_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) )
{
TRACE_2D(TRC_CSCHED_BOOST_START, vc->domain->domain_id, vc->vcpu_id);
- SCHED_STAT_CRANK(vcpu_boost);
+ SCHED_STAT_CRANK(item_boost);
svc->pri = CSCHED_PRI_TS_BOOST;
}
@@ -1529,7 +1529,7 @@ csched_acct(void* dummy)
credit < -credit_cap &&
!test_and_set_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) )
{
- SCHED_STAT_CRANK(vcpu_park);
+ SCHED_STAT_CRANK(item_park);
vcpu_pause_nosync(svc->vcpu);
}
@@ -1553,7 +1553,7 @@ csched_acct(void* dummy)
* call to make sure the VCPU's priority is not boosted
* if it is woken up here.
*/
- SCHED_STAT_CRANK(vcpu_unpark);
+ SCHED_STAT_CRANK(item_unpark);
vcpu_unpause(svc->vcpu);
}
@@ -2019,7 +2019,7 @@ csched2_vcpu_check(struct vcpu *vc)
{
BUG_ON( !is_idle_vcpu(vc) );
}
- SCHED_STAT_CRANK(vcpu_check);
+ SCHED_STAT_CRANK(item_check);
}
#define CSCHED2_VCPU_CHECK(_vc) (csched2_vcpu_check(_vc))
#else
@@ -2066,7 +2066,7 @@ csched2_alloc_vdata(const struct scheduler *ops, struct sched_item *item,
svc->budget_quota = 0;
INIT_LIST_HEAD(&svc->parked_elem);
- SCHED_STAT_CRANK(vcpu_alloc);
+ SCHED_STAT_CRANK(item_alloc);
return svc;
}
@@ -2078,7 +2078,7 @@ csched2_item_sleep(const struct scheduler *ops, struct sched_item *item)
struct csched2_item * const svc = csched2_item(item);
ASSERT(!is_idle_vcpu(vc));
- SCHED_STAT_CRANK(vcpu_sleep);
+ SCHED_STAT_CRANK(item_sleep);
if ( curr_on_cpu(vc->processor) == item )
{
@@ -2108,20 +2108,20 @@ csched2_item_wake(const struct scheduler *ops, struct sched_item *item)
if ( unlikely(curr_on_cpu(cpu) == item) )
{
- SCHED_STAT_CRANK(vcpu_wake_running);
+ SCHED_STAT_CRANK(item_wake_running);
goto out;
}
if ( unlikely(vcpu_on_runq(svc)) )
{
- SCHED_STAT_CRANK(vcpu_wake_onrunq);
+ SCHED_STAT_CRANK(item_wake_onrunq);
goto out;
}
if ( likely(vcpu_runnable(vc)) )
- SCHED_STAT_CRANK(vcpu_wake_runnable);
+ SCHED_STAT_CRANK(item_wake_runnable);
else
- SCHED_STAT_CRANK(vcpu_wake_not_runnable);
+ SCHED_STAT_CRANK(item_wake_not_runnable);
/* If the context hasn't been saved for this vcpu yet, we can't put it on
* another runqueue. Instead, we set a flag so that it will be put on the runqueue
@@ -3137,7 +3137,7 @@ csched2_item_insert(const struct scheduler *ops, struct sched_item *item)
sdom->nr_vcpus++;
- SCHED_STAT_CRANK(vcpu_insert);
+ SCHED_STAT_CRANK(item_insert);
CSCHED2_VCPU_CHECK(vc);
}
@@ -3160,7 +3160,7 @@ csched2_item_remove(const struct scheduler *ops, struct sched_item *item)
ASSERT(!is_idle_vcpu(vc));
ASSERT(list_empty(&svc->runq_elem));
- SCHED_STAT_CRANK(vcpu_remove);
+ SCHED_STAT_CRANK(item_remove);
/* Remove from runqueue */
lock = item_schedule_lock_irq(item);
@@ -207,7 +207,7 @@ static void *null_alloc_vdata(const struct scheduler *ops,
INIT_LIST_HEAD(&nvc->waitq_elem);
nvc->vcpu = v;
- SCHED_STAT_CRANK(vcpu_alloc);
+ SCHED_STAT_CRANK(item_alloc);
return nvc;
}
@@ -473,7 +473,7 @@ static void null_item_insert(const struct scheduler *ops,
}
spin_unlock_irq(lock);
- SCHED_STAT_CRANK(vcpu_insert);
+ SCHED_STAT_CRANK(item_insert);
}
static void _vcpu_remove(struct null_private *prv, struct vcpu *v)
@@ -544,7 +544,7 @@ static void null_item_remove(const struct scheduler *ops,
out:
item_schedule_unlock_irq(lock, item);
- SCHED_STAT_CRANK(vcpu_remove);
+ SCHED_STAT_CRANK(item_remove);
}
static void null_item_wake(const struct scheduler *ops,
@@ -556,21 +556,21 @@ static void null_item_wake(const struct scheduler *ops,
if ( unlikely(curr_on_cpu(v->processor) == item) )
{
- SCHED_STAT_CRANK(vcpu_wake_running);
+ SCHED_STAT_CRANK(item_wake_running);
return;
}
if ( unlikely(!list_empty(&null_item(item)->waitq_elem)) )
{
/* Not exactly "on runq", but close enough for reusing the counter */
- SCHED_STAT_CRANK(vcpu_wake_onrunq);
+ SCHED_STAT_CRANK(item_wake_onrunq);
return;
}
if ( likely(vcpu_runnable(v)) )
- SCHED_STAT_CRANK(vcpu_wake_runnable);
+ SCHED_STAT_CRANK(item_wake_runnable);
else
- SCHED_STAT_CRANK(vcpu_wake_not_runnable);
+ SCHED_STAT_CRANK(item_wake_not_runnable);
/* Note that we get here only for vCPUs assigned to a pCPU */
cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);
@@ -587,7 +587,7 @@ static void null_item_sleep(const struct scheduler *ops,
if ( curr_on_cpu(v->processor) == item )
cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);
- SCHED_STAT_CRANK(vcpu_sleep);
+ SCHED_STAT_CRANK(item_sleep);
}
static struct sched_resource *
@@ -697,7 +697,7 @@ static inline void null_vcpu_check(struct vcpu *v)
else
BUG_ON(!is_idle_vcpu(v));
- SCHED_STAT_CRANK(vcpu_check);
+ SCHED_STAT_CRANK(item_check);
}
#define NULL_VCPU_CHECK(v) (null_vcpu_check(v))
#else
@@ -870,7 +870,7 @@ rt_alloc_vdata(const struct scheduler *ops, struct sched_item *item, void *dd)
if ( !is_idle_vcpu(vc) )
svc->budget = RTDS_DEFAULT_BUDGET;
- SCHED_STAT_CRANK(vcpu_alloc);
+ SCHED_STAT_CRANK(item_alloc);
return svc;
}
@@ -919,7 +919,7 @@ rt_item_insert(const struct scheduler *ops, struct sched_item *item)
}
item_schedule_unlock_irq(lock, item);
- SCHED_STAT_CRANK(vcpu_insert);
+ SCHED_STAT_CRANK(item_insert);
}
/*
@@ -932,7 +932,7 @@ rt_item_remove(const struct scheduler *ops, struct sched_item *item)
struct rt_dom * const sdom = svc->sdom;
spinlock_t *lock;
- SCHED_STAT_CRANK(vcpu_remove);
+ SCHED_STAT_CRANK(item_remove);
BUG_ON( sdom == NULL );
@@ -1154,7 +1154,7 @@ rt_item_sleep(const struct scheduler *ops, struct sched_item *item)
struct rt_item * const svc = rt_item(item);
BUG_ON( is_idle_vcpu(vc) );
- SCHED_STAT_CRANK(vcpu_sleep);
+ SCHED_STAT_CRANK(item_sleep);
if ( curr_on_cpu(vc->processor) == item )
cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
@@ -1275,21 +1275,21 @@ rt_item_wake(const struct scheduler *ops, struct sched_item *item)
if ( unlikely(curr_on_cpu(vc->processor) == item) )
{
- SCHED_STAT_CRANK(vcpu_wake_running);
+ SCHED_STAT_CRANK(item_wake_running);
return;
}
/* on RunQ/DepletedQ, just update info is ok */
if ( unlikely(vcpu_on_q(svc)) )
{
- SCHED_STAT_CRANK(vcpu_wake_onrunq);
+ SCHED_STAT_CRANK(item_wake_onrunq);
return;
}
if ( likely(vcpu_runnable(vc)) )
- SCHED_STAT_CRANK(vcpu_wake_runnable);
+ SCHED_STAT_CRANK(item_wake_runnable);
else
- SCHED_STAT_CRANK(vcpu_wake_not_runnable);
+ SCHED_STAT_CRANK(item_wake_not_runnable);
/*
* If a deadline passed while svc was asleep/blocked, we need new
@@ -21,20 +21,20 @@ PERFCOUNTER(sched_ctx, "sched: context switches")
PERFCOUNTER(schedule, "sched: specific scheduler")
PERFCOUNTER(dom_init, "sched: dom_init")
PERFCOUNTER(dom_destroy, "sched: dom_destroy")
-PERFCOUNTER(vcpu_alloc, "sched: vcpu_alloc")
-PERFCOUNTER(vcpu_insert, "sched: vcpu_insert")
-PERFCOUNTER(vcpu_remove, "sched: vcpu_remove")
-PERFCOUNTER(vcpu_sleep, "sched: vcpu_sleep")
PERFCOUNTER(vcpu_yield, "sched: vcpu_yield")
-PERFCOUNTER(vcpu_wake_running, "sched: vcpu_wake_running")
-PERFCOUNTER(vcpu_wake_onrunq, "sched: vcpu_wake_onrunq")
-PERFCOUNTER(vcpu_wake_runnable, "sched: vcpu_wake_runnable")
-PERFCOUNTER(vcpu_wake_not_runnable, "sched: vcpu_wake_not_runnable")
+PERFCOUNTER(item_alloc, "sched: item_alloc")
+PERFCOUNTER(item_insert, "sched: item_insert")
+PERFCOUNTER(item_remove, "sched: item_remove")
+PERFCOUNTER(item_sleep, "sched: item_sleep")
+PERFCOUNTER(item_wake_running, "sched: item_wake_running")
+PERFCOUNTER(item_wake_onrunq, "sched: item_wake_onrunq")
+PERFCOUNTER(item_wake_runnable, "sched: item_wake_runnable")
+PERFCOUNTER(item_wake_not_runnable, "sched: item_wake_not_runnable")
PERFCOUNTER(tickled_no_cpu, "sched: tickled_no_cpu")
PERFCOUNTER(tickled_idle_cpu, "sched: tickled_idle_cpu")
PERFCOUNTER(tickled_idle_cpu_excl, "sched: tickled_idle_cpu_exclusive")
PERFCOUNTER(tickled_busy_cpu, "sched: tickled_busy_cpu")
-PERFCOUNTER(vcpu_check, "sched: vcpu_check")
+PERFCOUNTER(item_check, "sched: item_check")
/* credit specific counters */
PERFCOUNTER(delay_ms, "csched: delay")
@@ -43,11 +43,11 @@ PERFCOUNTER(acct_no_work, "csched: acct_no_work")
PERFCOUNTER(acct_balance, "csched: acct_balance")
PERFCOUNTER(acct_reorder, "csched: acct_reorder")
PERFCOUNTER(acct_min_credit, "csched: acct_min_credit")
-PERFCOUNTER(acct_vcpu_active, "csched: acct_vcpu_active")
-PERFCOUNTER(acct_vcpu_idle, "csched: acct_vcpu_idle")
-PERFCOUNTER(vcpu_boost, "csched: vcpu_boost")
-PERFCOUNTER(vcpu_park, "csched: vcpu_park")
-PERFCOUNTER(vcpu_unpark, "csched: vcpu_unpark")
+PERFCOUNTER(acct_item_active, "csched: acct_item_active")
+PERFCOUNTER(acct_item_idle, "csched: acct_item_idle")
+PERFCOUNTER(item_boost, "csched: item_boost")
+PERFCOUNTER(item_park, "csched: item_park")
+PERFCOUNTER(item_unpark, "csched: item_unpark")
PERFCOUNTER(load_balance_idle, "csched: load_balance_idle")
PERFCOUNTER(load_balance_over, "csched: load_balance_over")
PERFCOUNTER(load_balance_other, "csched: load_balance_other")
@@ -57,7 +57,7 @@ PERFCOUNTER(steal_peer_idle, "csched: steal_peer_idle")
PERFCOUNTER(migrate_queued, "csched: migrate_queued")
PERFCOUNTER(migrate_running, "csched: migrate_running")
PERFCOUNTER(migrate_kicked_away, "csched: migrate_kicked_away")
-PERFCOUNTER(vcpu_hot, "csched: vcpu_hot")
+PERFCOUNTER(item_hot, "csched: item_hot")
/* credit2 specific counters */
PERFCOUNTER(burn_credits_t2c, "csched2: burn_credits_t2c")
Rename the scheduler related perf counters from vcpu* to item* where appropriate. Signed-off-by: Juergen Gross <jgross@suse.com> --- xen/common/sched_credit.c | 32 ++++++++++++++++---------------- xen/common/sched_credit2.c | 18 +++++++++--------- xen/common/sched_null.c | 18 +++++++++--------- xen/common/sched_rt.c | 16 ++++++++-------- xen/include/xen/perfc_defn.h | 30 +++++++++++++++--------------- 5 files changed, 57 insertions(+), 57 deletions(-)