diff options
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 26 | 
1 files changed, 17 insertions, 9 deletions
| diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 455fa330de04..b8c007713b3b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -56,7 +56,6 @@  #include <linux/profile.h>  #include <linux/rcupdate_wait.h>  #include <linux/security.h> -#include <linux/stackprotector.h>  #include <linux/stop_machine.h>  #include <linux/suspend.h>  #include <linux/swait.h> @@ -346,6 +345,8 @@ struct cfs_bandwidth {  	int			nr_periods;  	int			nr_throttled;  	u64			throttled_time; + +	bool                    distribute_running;  #endif  }; @@ -715,8 +716,12 @@ struct root_domain {  	cpumask_var_t		span;  	cpumask_var_t		online; -	/* Indicate more than one runnable task for any CPU */ -	bool			overload; +	/* +	 * Indicate pullable load on at least one CPU, e.g: +	 * - More than one runnable task +	 * - Running task is misfit +	 */ +	int			overload;  	/*  	 * The bit corresponding to a CPU gets set here if such CPU has more @@ -843,6 +848,8 @@ struct rq {  	unsigned char		idle_balance; +	unsigned long		misfit_task_load; +  	/* For active balancing */  	int			active_balance;  	int			push_cpu; @@ -856,8 +863,7 @@ struct rq {  	struct sched_avg	avg_rt;  	struct sched_avg	avg_dl; -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) -#define HAVE_SCHED_AVG_IRQ +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ  	struct sched_avg	avg_irq;  #endif  	u64			idle_stamp; @@ -1186,6 +1192,7 @@ DECLARE_PER_CPU(int, sd_llc_id);  DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);  DECLARE_PER_CPU(struct sched_domain *, sd_numa);  DECLARE_PER_CPU(struct sched_domain *, sd_asym); +extern struct static_key_false sched_asym_cpucapacity;  struct sched_group_capacity {  	atomic_t		ref; @@ -1195,6 +1202,7 @@ struct sched_group_capacity {  	 */  	unsigned long		capacity;  	unsigned long		min_capacity;		/* Min per-CPU capacity in group */ +	unsigned long		max_capacity;		/* Max per-CPU capacity in group */  	unsigned long		next_update;  	int			imbalance;		/* XXX unrelated to capacity but shared group state */ @@ -1394,7 +1402,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features =  	0;  #undef SCHED_FEAT -#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) +#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))  #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ @@ -1694,8 +1702,8 @@ static inline void add_nr_running(struct rq *rq, unsigned count)  	if (prev_nr < 2 && rq->nr_running >= 2) {  #ifdef CONFIG_SMP -		if (!rq->rd->overload) -			rq->rd->overload = true; +		if (!READ_ONCE(rq->rd->overload)) +			WRITE_ONCE(rq->rd->overload, 1);  #endif  	} @@ -2215,7 +2223,7 @@ static inline unsigned long cpu_util_rt(struct rq *rq)  }  #endif -#ifdef HAVE_SCHED_AVG_IRQ +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ  static inline unsigned long cpu_util_irq(struct rq *rq)  {  	return rq->avg_irq.util_avg; |