diff options
-rw-r--r-- | kernel/sched/fair.c | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 87db481e8a56..c0145677ee99 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -10840,9 +10840,9 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) { unsigned long next_balance = jiffies + HZ; int this_cpu = this_rq->cpu; + u64 t0, t1, curr_cost = 0; struct sched_domain *sd; int pulled_task = 0; - u64 curr_cost = 0; update_misfit_status(NULL, this_rq); @@ -10887,11 +10887,13 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) raw_spin_rq_unlock(this_rq); + t0 = sched_clock_cpu(this_cpu); update_blocked_averages(this_cpu); + rcu_read_lock(); for_each_domain(this_cpu, sd) { int continue_balancing = 1; - u64 t0, domain_cost; + u64 domain_cost; if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { update_next_balance(sd, &next_balance); @@ -10899,17 +10901,18 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) } if (sd->flags & SD_BALANCE_NEWIDLE) { - t0 = sched_clock_cpu(this_cpu); pulled_task = load_balance(this_cpu, this_rq, sd, CPU_NEWLY_IDLE, &continue_balancing); - domain_cost = sched_clock_cpu(this_cpu) - t0; + t1 = sched_clock_cpu(this_cpu); + domain_cost = t1 - t0; if (domain_cost > sd->max_newidle_lb_cost) sd->max_newidle_lb_cost = domain_cost; curr_cost += domain_cost; + t0 = t1; } update_next_balance(sd, &next_balance); |