@@ -1074,6 +1074,7 @@ struct sched_avg {
* choices of y < 1-2^(-32)*1024.
*/
u32 runnable_avg_sum, runnable_avg_period;
+ u32 period_contrib;
u64 last_runnable_update;
s64 decay_count;
unsigned long load_avg_contrib;
@@ -2294,10 +2294,12 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
sa->last_runnable_update = now;
/* delta_w is the amount already accumulated against our next period */
- delta_w = sa->runnable_avg_period % 1024;
+ delta_w = sa->period_contrib;
if (delta + delta_w >= 1024) {
/* period roll-over */
decayed = 1;
+ /* how much left for next period will start over, we don't know yet */
+ sa->period_contrib = 0;
/*
* Now that we know we're crossing a period boundary, figure
@@ -2306,7 +2308,7 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
*/
delta_w = 1024 - delta_w;
if (runnable)
- sa->runnable_avg_sum += delta_w;
+ sa->runnable_avg_sum += runnable * delta_w;
sa->runnable_avg_period += delta_w;
delta -= delta_w;
@@ -2323,15 +2325,17 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
/* Efficiently calculate \sum (1..n_period) 1024*y^i */
runnable_contrib = __compute_runnable_contrib(periods);
if (runnable)
- sa->runnable_avg_sum += runnable_contrib;
+ sa->runnable_avg_sum += runnable * runnable_contrib;
sa->runnable_avg_period += runnable_contrib;
}
/* Remainder of delta accrued against u_0` */
if (runnable)
- sa->runnable_avg_sum += delta;
+ sa->runnable_avg_sum += runnable * delta;
sa->runnable_avg_period += delta;
+ sa->period_contrib += delta;
+
return decayed;
}
The current amount of time already accumulated against next period is determined by: delta_w = sa->runnable_avg_period % 1024 Considering the runnable_avg_period is the sum of an infinite series, this method looks gross, even the error would be no more than 1024 (~1ms). But precisely accounting this is not hard, just use a variable period_contrib to record it. The current runnable argument is either 1 or 0, indicating whether this entity is runnable or not. In order to account the number of runnables as well, change how runnable_avg_sum is accumulated. Signed-off-by: Yuyang Du <yuyang.du@intel.com> --- include/linux/sched.h | 1 + kernel/sched/fair.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 4 deletions(-)