diff mbox series

[RFC,1/8] cpuidle: menu: Remove iowait influence

Message ID 20240905092645.2885200-2-christian.loehle@arm.com (mailing list archive)
State RFC, archived
Delegated to: Rafael Wysocki
Headers show
Series cpufreq: cpuidle: Remove iowait behaviour | expand

Commit Message

Christian Loehle Sept. 5, 2024, 9:26 a.m. UTC
Remove CPU iowaiters influence on idle state selection.
Remove the menu notion of performance multiplier which increased with
the number of tasks that went to iowait sleep on this CPU and haven't
woken up yet.

Relying on iowait for cpuidle is problematic for a few reasons:
1. There is no guarantee that an iowaiting task will wake up on the
same CPU.
2. The task being in iowait says nothing about the idle duration, we
could be selecting shallower states for a long time.
3. The task being in iowait doesn't always imply a performance hit
with increased latency.
4. If there is such a performance hit, the number of iowaiting tasks
doesn't directly correlate.
5. The definition of iowait altogether is vague at best, it is
sprinkled across kernel code.

Signed-off-by: Christian Loehle <christian.loehle@arm.com>
---
 drivers/cpuidle/governors/menu.c | 76 ++++----------------------------
 1 file changed, 9 insertions(+), 67 deletions(-)

Comments

Rafael J. Wysocki Sept. 30, 2024, 2:58 p.m. UTC | #1
On Thu, Sep 5, 2024 at 11:27 AM Christian Loehle
<christian.loehle@arm.com> wrote:
>
> Remove CPU iowaiters influence on idle state selection.
> Remove the menu notion of performance multiplier which increased with
> the number of tasks that went to iowait sleep on this CPU and haven't
> woken up yet.
>
> Relying on iowait for cpuidle is problematic for a few reasons:
> 1. There is no guarantee that an iowaiting task will wake up on the
> same CPU.
> 2. The task being in iowait says nothing about the idle duration, we
> could be selecting shallower states for a long time.
> 3. The task being in iowait doesn't always imply a performance hit
> with increased latency.
> 4. If there is such a performance hit, the number of iowaiting tasks
> doesn't directly correlate.
> 5. The definition of iowait altogether is vague at best, it is
> sprinkled across kernel code.
>
> Signed-off-by: Christian Loehle <christian.loehle@arm.com>

I promised feedback on this series.

As far as this particular patch is concerned, I generally agree with
all of the above, so I'm going to put it into linux-next right away
and see if anyone reports a problem with it.

> ---
>  drivers/cpuidle/governors/menu.c | 76 ++++----------------------------
>  1 file changed, 9 insertions(+), 67 deletions(-)
>
> diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
> index f3c9d49f0f2a..28363bfa3e4c 100644
> --- a/drivers/cpuidle/governors/menu.c
> +++ b/drivers/cpuidle/governors/menu.c
> @@ -19,7 +19,7 @@
>
>  #include "gov.h"
>
> -#define BUCKETS 12
> +#define BUCKETS 6
>  #define INTERVAL_SHIFT 3
>  #define INTERVALS (1UL << INTERVAL_SHIFT)
>  #define RESOLUTION 1024
> @@ -29,12 +29,11 @@
>  /*
>   * Concepts and ideas behind the menu governor
>   *
> - * For the menu governor, there are 3 decision factors for picking a C
> + * For the menu governor, there are 2 decision factors for picking a C
>   * state:
>   * 1) Energy break even point
> - * 2) Performance impact
> - * 3) Latency tolerance (from pmqos infrastructure)
> - * These three factors are treated independently.
> + * 2) Latency tolerance (from pmqos infrastructure)
> + * These two factors are treated independently.
>   *
>   * Energy break even point
>   * -----------------------
> @@ -75,30 +74,6 @@
>   * intervals and if the stand deviation of these 8 intervals is below a
>   * threshold value, we use the average of these intervals as prediction.
>   *
> - * Limiting Performance Impact
> - * ---------------------------
> - * C states, especially those with large exit latencies, can have a real
> - * noticeable impact on workloads, which is not acceptable for most sysadmins,
> - * and in addition, less performance has a power price of its own.
> - *
> - * As a general rule of thumb, menu assumes that the following heuristic
> - * holds:
> - *     The busier the system, the less impact of C states is acceptable
> - *
> - * This rule-of-thumb is implemented using a performance-multiplier:
> - * If the exit latency times the performance multiplier is longer than
> - * the predicted duration, the C state is not considered a candidate
> - * for selection due to a too high performance impact. So the higher
> - * this multiplier is, the longer we need to be idle to pick a deep C
> - * state, and thus the less likely a busy CPU will hit such a deep
> - * C state.
> - *
> - * Currently there is only one value determining the factor:
> - * 10 points are added for each process that is waiting for IO on this CPU.
> - * (This value was experimentally determined.)
> - * Utilization is no longer a factor as it was shown that it never contributed
> - * significantly to the performance multiplier in the first place.
> - *
>   */
>
>  struct menu_device {
> @@ -112,19 +87,10 @@ struct menu_device {
>         int             interval_ptr;
>  };
>
> -static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters)
> +static inline int which_bucket(u64 duration_ns)
>  {
>         int bucket = 0;
>
> -       /*
> -        * We keep two groups of stats; one with no
> -        * IO pending, one without.
> -        * This allows us to calculate
> -        * E(duration)|iowait
> -        */
> -       if (nr_iowaiters)
> -               bucket = BUCKETS/2;
> -
>         if (duration_ns < 10ULL * NSEC_PER_USEC)
>                 return bucket;
>         if (duration_ns < 100ULL * NSEC_PER_USEC)
> @@ -138,19 +104,6 @@ static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters)
>         return bucket + 5;
>  }
>
> -/*
> - * Return a multiplier for the exit latency that is intended
> - * to take performance requirements into account.
> - * The more performance critical we estimate the system
> - * to be, the higher this multiplier, and thus the higher
> - * the barrier to go to an expensive C state.
> - */
> -static inline int performance_multiplier(unsigned int nr_iowaiters)
> -{
> -       /* for IO wait tasks (per cpu!) we add 10x each */
> -       return 1 + 10 * nr_iowaiters;
> -}
> -
>  static DEFINE_PER_CPU(struct menu_device, menu_devices);
>
>  static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
> @@ -258,8 +211,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
>         struct menu_device *data = this_cpu_ptr(&menu_devices);
>         s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
>         u64 predicted_ns;
> -       u64 interactivity_req;
> -       unsigned int nr_iowaiters;
>         ktime_t delta, delta_tick;
>         int i, idx;
>
> @@ -268,8 +219,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
>                 data->needs_update = 0;
>         }
>
> -       nr_iowaiters = nr_iowait_cpu(dev->cpu);
> -
>         /* Find the shortest expected idle interval. */
>         predicted_ns = get_typical_interval(data) * NSEC_PER_USEC;
>         if (predicted_ns > RESIDENCY_THRESHOLD_NS) {
> @@ -283,7 +232,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
>                 }
>
>                 data->next_timer_ns = delta;
> -               data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
> +               data->bucket = which_bucket(data->next_timer_ns);
>
>                 /* Round up the result for half microseconds. */
>                 timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 +
> @@ -301,7 +250,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
>                  */
>                 data->next_timer_ns = KTIME_MAX;
>                 delta_tick = TICK_NSEC / 2;
> -               data->bucket = which_bucket(KTIME_MAX, nr_iowaiters);
> +               data->bucket = which_bucket(KTIME_MAX);
>         }
>
>         if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
> @@ -328,15 +277,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
>                  */
>                 if (predicted_ns < TICK_NSEC)
>                         predicted_ns = data->next_timer_ns;
> -       } else {
> -               /*
> -                * Use the performance multiplier and the user-configurable
> -                * latency_req to determine the maximum exit latency.
> -                */
> -               interactivity_req = div64_u64(predicted_ns,
> -                                             performance_multiplier(nr_iowaiters));
> -               if (latency_req > interactivity_req)
> -                       latency_req = interactivity_req;
> +       } else if (latency_req > predicted_ns) {
> +               latency_req = predicted_ns;
>         }
>
>         /*
> --
> 2.34.1
>
diff mbox series

Patch

diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f3c9d49f0f2a..28363bfa3e4c 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -19,7 +19,7 @@ 
 
 #include "gov.h"
 
-#define BUCKETS 12
+#define BUCKETS 6
 #define INTERVAL_SHIFT 3
 #define INTERVALS (1UL << INTERVAL_SHIFT)
 #define RESOLUTION 1024
@@ -29,12 +29,11 @@ 
 /*
  * Concepts and ideas behind the menu governor
  *
- * For the menu governor, there are 3 decision factors for picking a C
+ * For the menu governor, there are 2 decision factors for picking a C
  * state:
  * 1) Energy break even point
- * 2) Performance impact
- * 3) Latency tolerance (from pmqos infrastructure)
- * These three factors are treated independently.
+ * 2) Latency tolerance (from pmqos infrastructure)
+ * These two factors are treated independently.
  *
  * Energy break even point
  * -----------------------
@@ -75,30 +74,6 @@ 
  * intervals and if the stand deviation of these 8 intervals is below a
  * threshold value, we use the average of these intervals as prediction.
  *
- * Limiting Performance Impact
- * ---------------------------
- * C states, especially those with large exit latencies, can have a real
- * noticeable impact on workloads, which is not acceptable for most sysadmins,
- * and in addition, less performance has a power price of its own.
- *
- * As a general rule of thumb, menu assumes that the following heuristic
- * holds:
- *     The busier the system, the less impact of C states is acceptable
- *
- * This rule-of-thumb is implemented using a performance-multiplier:
- * If the exit latency times the performance multiplier is longer than
- * the predicted duration, the C state is not considered a candidate
- * for selection due to a too high performance impact. So the higher
- * this multiplier is, the longer we need to be idle to pick a deep C
- * state, and thus the less likely a busy CPU will hit such a deep
- * C state.
- *
- * Currently there is only one value determining the factor:
- * 10 points are added for each process that is waiting for IO on this CPU.
- * (This value was experimentally determined.)
- * Utilization is no longer a factor as it was shown that it never contributed
- * significantly to the performance multiplier in the first place.
- *
  */
 
 struct menu_device {
@@ -112,19 +87,10 @@  struct menu_device {
 	int		interval_ptr;
 };
 
-static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters)
+static inline int which_bucket(u64 duration_ns)
 {
 	int bucket = 0;
 
-	/*
-	 * We keep two groups of stats; one with no
-	 * IO pending, one without.
-	 * This allows us to calculate
-	 * E(duration)|iowait
-	 */
-	if (nr_iowaiters)
-		bucket = BUCKETS/2;
-
 	if (duration_ns < 10ULL * NSEC_PER_USEC)
 		return bucket;
 	if (duration_ns < 100ULL * NSEC_PER_USEC)
@@ -138,19 +104,6 @@  static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters)
 	return bucket + 5;
 }
 
-/*
- * Return a multiplier for the exit latency that is intended
- * to take performance requirements into account.
- * The more performance critical we estimate the system
- * to be, the higher this multiplier, and thus the higher
- * the barrier to go to an expensive C state.
- */
-static inline int performance_multiplier(unsigned int nr_iowaiters)
-{
-	/* for IO wait tasks (per cpu!) we add 10x each */
-	return 1 + 10 * nr_iowaiters;
-}
-
 static DEFINE_PER_CPU(struct menu_device, menu_devices);
 
 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
@@ -258,8 +211,6 @@  static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
 	struct menu_device *data = this_cpu_ptr(&menu_devices);
 	s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
 	u64 predicted_ns;
-	u64 interactivity_req;
-	unsigned int nr_iowaiters;
 	ktime_t delta, delta_tick;
 	int i, idx;
 
@@ -268,8 +219,6 @@  static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
 		data->needs_update = 0;
 	}
 
-	nr_iowaiters = nr_iowait_cpu(dev->cpu);
-
 	/* Find the shortest expected idle interval. */
 	predicted_ns = get_typical_interval(data) * NSEC_PER_USEC;
 	if (predicted_ns > RESIDENCY_THRESHOLD_NS) {
@@ -283,7 +232,7 @@  static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
 		}
 
 		data->next_timer_ns = delta;
-		data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
+		data->bucket = which_bucket(data->next_timer_ns);
 
 		/* Round up the result for half microseconds. */
 		timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 +
@@ -301,7 +250,7 @@  static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
 		 */
 		data->next_timer_ns = KTIME_MAX;
 		delta_tick = TICK_NSEC / 2;
-		data->bucket = which_bucket(KTIME_MAX, nr_iowaiters);
+		data->bucket = which_bucket(KTIME_MAX);
 	}
 
 	if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
@@ -328,15 +277,8 @@  static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
 		 */
 		if (predicted_ns < TICK_NSEC)
 			predicted_ns = data->next_timer_ns;
-	} else {
-		/*
-		 * Use the performance multiplier and the user-configurable
-		 * latency_req to determine the maximum exit latency.
-		 */
-		interactivity_req = div64_u64(predicted_ns,
-					      performance_multiplier(nr_iowaiters));
-		if (latency_req > interactivity_req)
-			latency_req = interactivity_req;
+	} else if (latency_req > predicted_ns) {
+		latency_req = predicted_ns;
 	}
 
 	/*