diff options
Diffstat (limited to 'kernel/sched/fair.c')
| -rw-r--r-- | kernel/sched/fair.c | 20 | 
1 files changed, 16 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2d4ad72f8f3c..d941c97dfbc3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -690,7 +690,14 @@ void init_entity_runnable_average(struct sched_entity *se)  	 * will definitely be update (after enqueue).  	 */  	sa->period_contrib = 1023; -	sa->load_avg = scale_load_down(se->load.weight); +	/* +	 * Tasks are intialized with full load to be seen as heavy tasks until +	 * they get a chance to stabilize to their real load level. +	 * Group entities are intialized with zero load to reflect the fact that +	 * nothing has been attached to the task group yet. +	 */ +	if (entity_is_task(se)) +		sa->load_avg = scale_load_down(se->load.weight);  	sa->load_sum = sa->load_avg * LOAD_AVG_MAX;  	/*  	 * At this point, util_avg won't be used in select_task_rq_fair anyway @@ -5471,13 +5478,18 @@ static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd   */  static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)  { -	struct sched_domain *this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); -	u64 avg_idle = this_rq()->avg_idle; -	u64 avg_cost = this_sd->avg_scan_cost; +	struct sched_domain *this_sd; +	u64 avg_cost, avg_idle = this_rq()->avg_idle;  	u64 time, cost;  	s64 delta;  	int cpu, wrap; +	this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); +	if (!this_sd) +		return -1; + +	avg_cost = this_sd->avg_scan_cost; +  	/*  	 * Due to large variance we need a large fuzz factor; hackbench in  	 * particularly is sensitive here.  |