@@ -46,8 +46,8 @@ static void do_idle(void)
{
unsigned int cpu = smp_processor_id();
- sched_tick_suspend();
- /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */
+ rcu_idle_enter(cpu);
+ /* rcu_idle_enter() can raise TIMER_SOFTIRQ. Process it now. */
process_pending_softirqs();
local_irq_disable();
@@ -58,7 +58,7 @@ static void do_idle(void)
}
local_irq_enable();
- sched_tick_resume();
+ rcu_idle_exit(cpu);
}
void idle_loop(void)
@@ -599,7 +599,8 @@ void update_idle_stats(struct acpi_processor_power *power,
static void acpi_processor_idle(void)
{
- struct acpi_processor_power *power = processor_powers[smp_processor_id()];
+ unsigned int cpu = smp_processor_id();
+ struct acpi_processor_power *power = processor_powers[cpu];
struct acpi_processor_cx *cx = NULL;
int next_state;
uint64_t t1, t2 = 0;
@@ -648,8 +649,8 @@ static void acpi_processor_idle(void)
cpufreq_dbs_timer_suspend();
- sched_tick_suspend();
- /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */
+ rcu_idle_enter(cpu);
+ /* rcu_idle_enter() can raise TIMER_SOFTIRQ. Process it now. */
process_pending_softirqs();
/*
@@ -658,10 +659,10 @@ static void acpi_processor_idle(void)
*/
local_irq_disable();
- if ( !cpu_is_haltable(smp_processor_id()) )
+ if ( !cpu_is_haltable(cpu) )
{
local_irq_enable();
- sched_tick_resume();
+ rcu_idle_exit(cpu);
cpufreq_dbs_timer_resume();
return;
}
@@ -786,7 +787,7 @@ static void acpi_processor_idle(void)
/* Now in C0 */
power->last_state = &power->states[0];
local_irq_enable();
- sched_tick_resume();
+ rcu_idle_exit(cpu);
cpufreq_dbs_timer_resume();
return;
}
@@ -794,7 +795,7 @@ static void acpi_processor_idle(void)
/* Now in C0 */
power->last_state = &power->states[0];
- sched_tick_resume();
+ rcu_idle_exit(cpu);
cpufreq_dbs_timer_resume();
if ( cpuidle_current_governor->reflect )
@@ -755,8 +755,8 @@ static void mwait_idle(void)
cpufreq_dbs_timer_suspend();
- sched_tick_suspend();
- /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */
+ rcu_idle_enter(cpu);
+ /* rcu_idle_enter() can raise TIMER_SOFTIRQ. Process it now. */
process_pending_softirqs();
/* Interrupts must be disabled for C2 and higher transitions. */
@@ -764,7 +764,7 @@ static void mwait_idle(void)
if (!cpu_is_haltable(cpu)) {
local_irq_enable();
- sched_tick_resume();
+ rcu_idle_exit(cpu);
cpufreq_dbs_timer_resume();
return;
}
@@ -806,7 +806,7 @@ static void mwait_idle(void)
if (!(lapic_timer_reliable_states & (1 << cstate)))
lapic_timer_on();
- sched_tick_resume();
+ rcu_idle_exit(cpu);
cpufreq_dbs_timer_resume();
if ( cpuidle_current_governor->reflect )
@@ -459,7 +459,7 @@ int rcu_needs_cpu(int cpu)
* periodically poke rcu_pedning(), so that it will invoke the callback
* not too late after the end of the grace period.
*/
-void rcu_idle_timer_start()
+static void rcu_idle_timer_start(void)
{
struct rcu_data *rdp = &this_cpu(rcu_data);
@@ -475,7 +475,7 @@ void rcu_idle_timer_start()
rdp->idle_timer_active = true;
}
-void rcu_idle_timer_stop()
+static void rcu_idle_timer_stop(void)
{
struct rcu_data *rdp = &this_cpu(rcu_data);
@@ -633,10 +633,13 @@ void rcu_idle_enter(unsigned int cpu)
* Se the comment before cpumask_andnot() in rcu_start_batch().
*/
smp_mb();
+
+ rcu_idle_timer_start();
}
void rcu_idle_exit(unsigned int cpu)
{
+ rcu_idle_timer_stop();
ASSERT(cpumask_test_cpu(cpu, &rcu_ctrlblk.idle_cpumask));
cpumask_clear_cpu(cpu, &rcu_ctrlblk.idle_cpumask);
}
@@ -3265,18 +3265,6 @@ void schedule_dump(struct cpupool *c)
rcu_read_unlock(&sched_res_rculock);
}
-void sched_tick_suspend(void)
-{
- rcu_idle_enter(smp_processor_id());
- rcu_idle_timer_start();
-}
-
-void sched_tick_resume(void)
-{
- rcu_idle_timer_stop();
- rcu_idle_exit(smp_processor_id());
-}
-
void wait(void)
{
schedule();
@@ -148,7 +148,4 @@ int rcu_barrier(void);
void rcu_idle_enter(unsigned int cpu);
void rcu_idle_exit(unsigned int cpu);
-void rcu_idle_timer_start(void);
-void rcu_idle_timer_stop(void);
-
#endif /* __XEN_RCUPDATE_H */
@@ -688,8 +688,6 @@ void sched_destroy_domain(struct domain *d);
long sched_adjust(struct domain *, struct xen_domctl_scheduler_op *);
long sched_adjust_global(struct xen_sysctl_scheduler_op *);
int sched_id(void);
-void sched_tick_suspend(void);
-void sched_tick_resume(void);
void vcpu_wake(struct vcpu *v);
long vcpu_yield(void);
void vcpu_sleep_nosync(struct vcpu *v);
sched_tick_suspend() and sched_tick_resume() only call rcu related functions, so eliminate them and do the rcu_idle_timer*() calling in rcu_idle_[enter|exit](). Signed-off-by: Juergen Gross <jgross@suse.com> --- xen/arch/arm/domain.c | 6 +++--- xen/arch/x86/acpi/cpu_idle.c | 15 ++++++++------- xen/arch/x86/cpu/mwait-idle.c | 8 ++++---- xen/common/rcupdate.c | 7 +++++-- xen/common/sched/schedule.c | 12 ------------ xen/include/xen/rcupdate.h | 3 --- xen/include/xen/sched.h | 2 -- 7 files changed, 20 insertions(+), 33 deletions(-)