diff options
Diffstat (limited to 'kernel/sched/core.c')
| -rw-r--r-- | kernel/sched/core.c | 71 | 
1 files changed, 36 insertions, 35 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a98d54cd5535..fe365c9a08e9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7,9 +7,10 @@   */  #include "sched.h" -#include <linux/kthread.h>  #include <linux/nospec.h> +#include <linux/kcov.h> +  #include <asm/switch_to.h>  #include <asm/tlb.h> @@ -2633,6 +2634,7 @@ static inline void  prepare_task_switch(struct rq *rq, struct task_struct *prev,  		    struct task_struct *next)  { +	kcov_prepare_switch(prev);  	sched_info_switch(rq, prev, next);  	perf_event_task_sched_out(prev, next);  	rseq_preempt(prev); @@ -2702,6 +2704,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)  	finish_task(prev);  	finish_lock_switch(rq);  	finish_arch_post_lock_switch(); +	kcov_finish_switch(current);  	fire_sched_in_preempt_notifiers(current);  	/* @@ -2720,28 +2723,20 @@ static struct rq *finish_task_switch(struct task_struct *prev)  		membarrier_mm_sync_core_before_usermode(mm);  		mmdrop(mm);  	} -	if (unlikely(prev_state & (TASK_DEAD|TASK_PARKED))) { -		switch (prev_state) { -		case TASK_DEAD: -			if (prev->sched_class->task_dead) -				prev->sched_class->task_dead(prev); - -			/* -			 * Remove function-return probe instances associated with this -			 * task and put them back on the free list. -			 */ -			kprobe_flush_task(prev); +	if (unlikely(prev_state == TASK_DEAD)) { +		if (prev->sched_class->task_dead) +			prev->sched_class->task_dead(prev); -			/* Task is done with its stack. */ -			put_task_stack(prev); +		/* +		 * Remove function-return probe instances associated with this +		 * task and put them back on the free list. +		 */ +		kprobe_flush_task(prev); -			put_task_struct(prev); -			break; +		/* Task is done with its stack. */ +		put_task_stack(prev); -		case TASK_PARKED: -			kthread_park_complete(prev); -			break; -		} +		put_task_struct(prev);  	}  	tick_nohz_task_switch(); @@ -3109,7 +3104,9 @@ static void sched_tick_remote(struct work_struct *work)  	struct tick_work *twork = container_of(dwork, struct tick_work, work);  	int cpu = twork->cpu;  	struct rq *rq = cpu_rq(cpu); +	struct task_struct *curr;  	struct rq_flags rf; +	u64 delta;  	/*  	 * Handle the tick only if it appears the remote CPU is running in full @@ -3118,24 +3115,28 @@ static void sched_tick_remote(struct work_struct *work)  	 * statistics and checks timeslices in a time-independent way, regardless  	 * of when exactly it is running.  	 */ -	if (!idle_cpu(cpu) && tick_nohz_tick_stopped_cpu(cpu)) { -		struct task_struct *curr; -		u64 delta; +	if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu)) +		goto out_requeue; -		rq_lock_irq(rq, &rf); -		update_rq_clock(rq); -		curr = rq->curr; -		delta = rq_clock_task(rq) - curr->se.exec_start; +	rq_lock_irq(rq, &rf); +	curr = rq->curr; +	if (is_idle_task(curr)) +		goto out_unlock; -		/* -		 * Make sure the next tick runs within a reasonable -		 * amount of time. -		 */ -		WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); -		curr->sched_class->task_tick(rq, curr, 0); -		rq_unlock_irq(rq, &rf); -	} +	update_rq_clock(rq); +	delta = rq_clock_task(rq) - curr->se.exec_start; + +	/* +	 * Make sure the next tick runs within a reasonable +	 * amount of time. +	 */ +	WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); +	curr->sched_class->task_tick(rq, curr, 0); + +out_unlock: +	rq_unlock_irq(rq, &rf); +out_requeue:  	/*  	 * Run the remote tick once per second (1Hz). This arbitrary  	 * frequency is large enough to avoid overload but short enough  |