@@ -2126,6 +2126,8 @@ void wake_up_new_task(struct task_struct *p)
struct rq *rq;
raw_spin_lock_irqsave(&p->pi_lock, flags);
+ /* Initialize new task's runnable average */
+ init_task_runnable_average(p);
#ifdef CONFIG_SMP
/*
* Fork balancing, do it here and not earlier because:
@@ -2135,8 +2137,6 @@ void wake_up_new_task(struct task_struct *p)
set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
#endif
- /* Initialize new task's runnable average */
- init_task_runnable_average(p);
rq = __task_rq_lock(p);
activate_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -675,11 +675,10 @@ static inline void __update_task_entity_utilization(struct sched_entity *se);
/* Give new task start runnable values to heavy its load in infant time */
void init_task_runnable_average(struct task_struct *p)
{
- u32 slice;
+ u32 start_load = sysctl_sched_latency >> 10;
- slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
- p->se.avg.runnable_avg_sum = p->se.avg.running_avg_sum = slice;
- p->se.avg.avg_period = slice;
+ p->se.avg.runnable_avg_sum = p->se.avg.running_avg_sum = start_load;
+ p->se.avg.avg_period = start_load;
__update_task_entity_contrib(&p->se);
__update_task_entity_utilization(&p->se);
}
Task load or usage is not currently considered in select_task_rq_fair(), but if we want that in the future we should make sure it is not zero for new tasks. The load-tracking sums are currently initialized using sched_slice(), that won't work before the task has been assigned a rq. Initialization is therefore changed to another semi-arbitrary value, sched_latency, instead. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com> --- kernel/sched/core.c | 4 ++-- kernel/sched/fair.c | 7 +++---- 2 files changed, 5 insertions(+), 6 deletions(-)