diff mbox

acpi_idle: Very idle Core i7 machine never enters C3

Message ID 20100205205312.GA4532@jgarrett.org (mailing list archive)
State New, archived
Headers show

Commit Message

Jeff Garrett Feb. 5, 2010, 8:53 p.m. UTC
None
diff mbox

Patch

diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 7c0441f..8c636de 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -849,73 +851,6 @@  static int acpi_idle_enter_c1(struct cpuidle_device *dev,
 	return idle_time;
 }
 
-/**
- * acpi_idle_enter_simple - enters an ACPI state without BM handling
- * @dev: the target CPU
- * @state: the state data
- */
-static int acpi_idle_enter_simple(struct cpuidle_device *dev,
-				  struct cpuidle_state *state)
-{
-	struct acpi_processor *pr;
-	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
-	ktime_t  kt1, kt2;
-	s64 idle_time;
-	s64 sleep_ticks = 0;
-
-	pr = __get_cpu_var(processors);
-
-	if (unlikely(!pr))
-		return 0;
-
-	if (acpi_idle_suspend)
-		return(acpi_idle_enter_c1(dev, state));
-
-	local_irq_disable();
-	current_thread_info()->status &= ~TS_POLLING;
-	/*
-	 * TS_POLLING-cleared state must be visible before we test
-	 * NEED_RESCHED:
-	 */
-	smp_mb();
-
-	if (unlikely(need_resched())) {
-		current_thread_info()->status |= TS_POLLING;
-		local_irq_enable();
-		return 0;
-	}
-
-	/*
-	 * Must be done before busmaster disable as we might need to
-	 * access HPET !
-	 */
-	lapic_timer_state_broadcast(pr, cx, 1);
-
-	if (cx->type == ACPI_STATE_C3)
-		ACPI_FLUSH_CPU_CACHE();
-
-	kt1 = ktime_get_real();
-	/* Tell the scheduler that we are going deep-idle: */
-	sched_clock_idle_sleep_event();
-	acpi_idle_do_entry(cx);
-	kt2 = ktime_get_real();
-	idle_time =  ktime_to_us(ktime_sub(kt2, kt1));
-
-	sleep_ticks = us_to_pm_timer_ticks(idle_time);
-
-	/* Tell the scheduler how much we idled: */
-	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
-
-	local_irq_enable();
-	current_thread_info()->status |= TS_POLLING;
-
-	cx->usage++;
-
-	lapic_timer_state_broadcast(pr, cx, 0);
-	cx->time += sleep_ticks;
-	return idle_time;
-}
-
 static int c3_cpu_count;
 static DEFINE_SPINLOCK(c3_lock);
 
@@ -944,7 +879,7 @@  static int acpi_idle_enter_bm(struct cpuidle_device *dev,
 	if (acpi_idle_suspend)
 		return(acpi_idle_enter_c1(dev, state));
 
-	if (acpi_idle_bm_check()) {
+	if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check && acpi_idle_bm_check()) {
 		if (dev->safe_state) {
 			dev->last_state = dev->safe_state;
 			return dev->safe_state->enter(dev, dev->safe_state);
@@ -970,17 +905,24 @@  static int acpi_idle_enter_bm(struct cpuidle_device *dev,
 		return 0;
 	}
 
-	acpi_unlazy_tlb(smp_processor_id());
+	if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
+		acpi_unlazy_tlb(smp_processor_id());
 
 	/* Tell the scheduler that we are going deep-idle: */
-	sched_clock_idle_sleep_event();
+	if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
+		sched_clock_idle_sleep_event();
 	/*
 	 * Must be done before busmaster disable as we might need to
 	 * access HPET !
 	 */
 	lapic_timer_state_broadcast(pr, cx, 1);
 
+	if (cx->type == ACPI_STATE_C3 && !pr->flags.bm_check)
+		ACPI_FLUSH_CPU_CACHE();
+
 	kt1 = ktime_get_real();
+	if (cx->type != ACPI_STATE_C3 || !pr->flags.bm_check)
+		sched_clock_idle_sleep_event();
 	/*
 	 * disable bus master
 	 * bm_check implies we need ARB_DIS
@@ -991,21 +933,19 @@  static int acpi_idle_enter_bm(struct cpuidle_device *dev,
 	 * not set. In that case we cannot do much, we enter C3
 	 * without doing anything.
 	 */
-	if (pr->flags.bm_check && pr->flags.bm_control) {
+	if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check && pr->flags.bm_control) {
 		spin_lock(&c3_lock);
 		c3_cpu_count++;
 		/* Disable bus master arbitration when all CPUs are in C3 */
 		if (c3_cpu_count == num_online_cpus())
 			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
 		spin_unlock(&c3_lock);
-	} else if (!pr->flags.bm_check) {
-		ACPI_FLUSH_CPU_CACHE();
 	}
 
 	acpi_idle_do_entry(cx);
 
 	/* Re-enable bus master arbitration */
-	if (pr->flags.bm_check && pr->flags.bm_control) {
+	if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check && pr->flags.bm_control) {
 		spin_lock(&c3_lock);
 		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
 		c3_cpu_count--;
@@ -1095,7 +1035,7 @@  static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
 			case ACPI_STATE_C2:
 			state->flags |= CPUIDLE_FLAG_BALANCED;
 			state->flags |= CPUIDLE_FLAG_TIME_VALID;
-			state->enter = acpi_idle_enter_simple;
+			state->enter = acpi_idle_enter_bm;
 			dev->safe_state = state;
 			break;
 
@@ -1103,9 +1043,7 @@  static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
 			state->flags |= CPUIDLE_FLAG_DEEP;
 			state->flags |= CPUIDLE_FLAG_TIME_VALID;
 			state->flags |= CPUIDLE_FLAG_CHECK_BM;
-			state->enter = pr->flags.bm_check ?
-					acpi_idle_enter_bm :
-					acpi_idle_enter_simple;
+			state->enter = acpi_idle_enter_bm;
 			break;
 		}