diff options
Diffstat (limited to 'kernel/sched/idle.c')
| -rw-r--r-- | kernel/sched/idle.c | 44 | 
1 files changed, 25 insertions, 19 deletions
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 80940939b733..8dad5aa600ea 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -238,16 +238,16 @@ static void do_idle(void)  	tick_nohz_idle_enter();  	while (!need_resched()) { -		check_pgt_cache();  		rmb(); +		local_irq_disable(); +  		if (cpu_is_offline(cpu)) { -			tick_nohz_idle_stop_tick_protected(); +			tick_nohz_idle_stop_tick();  			cpuhp_report_idle_dead();  			arch_cpu_idle_dead();  		} -		local_irq_disable();  		arch_cpu_idle_enter();  		/* @@ -311,7 +311,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)  	return HRTIMER_NORESTART;  } -void play_idle(unsigned long duration_ms) +void play_idle(unsigned long duration_us)  {  	struct idle_timer it; @@ -323,7 +323,7 @@ void play_idle(unsigned long duration_ms)  	WARN_ON_ONCE(current->nr_cpus_allowed != 1);  	WARN_ON_ONCE(!(current->flags & PF_KTHREAD));  	WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY)); -	WARN_ON_ONCE(!duration_ms); +	WARN_ON_ONCE(!duration_us);  	rcu_sleep_check();  	preempt_disable(); @@ -333,7 +333,8 @@ void play_idle(unsigned long duration_ms)  	it.done = 0;  	hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);  	it.timer.function = idle_inject_timer_fn; -	hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED); +	hrtimer_start(&it.timer, ns_to_ktime(duration_us * NSEC_PER_USEC), +		      HRTIMER_MODE_REL_PINNED);  	while (!READ_ONCE(it.done))  		do_idle(); @@ -374,14 +375,27 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl  	resched_curr(rq);  } -static struct task_struct * -pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) +static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) +{ +} + +static void set_next_task_idle(struct rq *rq, struct task_struct *next)  { -	put_prev_task(rq, prev);  	update_idle_core(rq);  	schedstat_inc(rq->sched_goidle); +} + +static struct task_struct * +pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) +{ +	struct task_struct *next = rq->idle; -	return rq->idle; +	if (prev) +		put_prev_task(rq, prev); + +	set_next_task_idle(rq, next); + +	return next;  }  /* @@ -397,10 +411,6 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)  	raw_spin_lock_irq(&rq->lock);  } -static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) -{ -} -  /*   * scheduler tick hitting a task of our scheduling class.   * @@ -413,10 +423,6 @@ static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)  {  } -static void set_curr_task_idle(struct rq *rq) -{ -} -  static void switched_to_idle(struct rq *rq, struct task_struct *p)  {  	BUG(); @@ -451,13 +457,13 @@ const struct sched_class idle_sched_class = {  	.pick_next_task		= pick_next_task_idle,  	.put_prev_task		= put_prev_task_idle, +	.set_next_task          = set_next_task_idle,  #ifdef CONFIG_SMP  	.select_task_rq		= select_task_rq_idle,  	.set_cpus_allowed	= set_cpus_allowed_common,  #endif -	.set_curr_task          = set_curr_task_idle,  	.task_tick		= task_tick_idle,  	.get_rr_interval	= get_rr_interval_idle,  |