diff options
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 87 | 
1 files changed, 67 insertions, 20 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c7742dcc136c..4a2e8cae63c4 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -594,6 +594,7 @@ struct rt_rq {  	unsigned long		rt_nr_total;  	int			overloaded;  	struct plist_head	pushable_tasks; +  #endif /* CONFIG_SMP */  	int			rt_queued; @@ -673,7 +674,26 @@ struct dl_rq {  	u64			bw_ratio;  }; +#ifdef CONFIG_FAIR_GROUP_SCHED +/* An entity is a task if it doesn't "own" a runqueue */ +#define entity_is_task(se)	(!se->my_q) +#else +#define entity_is_task(se)	1 +#endif +  #ifdef CONFIG_SMP +/* + * XXX we want to get rid of these helpers and use the full load resolution. + */ +static inline long se_weight(struct sched_entity *se) +{ +	return scale_load_down(se->load.weight); +} + +static inline long se_runnable(struct sched_entity *se) +{ +	return scale_load_down(se->runnable_weight); +}  static inline bool sched_asym_prefer(int a, int b)  { @@ -833,8 +853,12 @@ struct rq {  	struct list_head cfs_tasks; -	u64			rt_avg; -	u64			age_stamp; +	struct sched_avg	avg_rt; +	struct sched_avg	avg_dl; +#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) +#define HAVE_SCHED_AVG_IRQ +	struct sched_avg	avg_irq; +#endif  	u64			idle_stamp;  	u64			avg_idle; @@ -1075,7 +1099,8 @@ enum numa_faults_stats {  };  extern void sched_setnuma(struct task_struct *p, int node);  extern int migrate_task_to(struct task_struct *p, int cpu); -extern int migrate_swap(struct task_struct *, struct task_struct *); +extern int migrate_swap(struct task_struct *p, struct task_struct *t, +			int cpu, int scpu);  extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);  #else  static inline void @@ -1690,15 +1715,9 @@ extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);  extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); -extern const_debug unsigned int sysctl_sched_time_avg;  extern const_debug unsigned int sysctl_sched_nr_migrate;  extern const_debug unsigned int sysctl_sched_migration_cost; -static inline u64 sched_avg_period(void) -{ -	return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; -} -  #ifdef CONFIG_SCHED_HRTICK  /* @@ -1735,8 +1754,6 @@ unsigned long arch_scale_freq_capacity(int cpu)  #endif  #ifdef CONFIG_SMP -extern void sched_avg_update(struct rq *rq); -  #ifndef arch_scale_cpu_capacity  static __always_inline  unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) @@ -1747,12 +1764,6 @@ unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)  	return SCHED_CAPACITY_SCALE;  }  #endif - -static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) -{ -	rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq)); -	sched_avg_update(rq); -}  #else  #ifndef arch_scale_cpu_capacity  static __always_inline @@ -1761,8 +1772,6 @@ unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)  	return SCHED_CAPACITY_SCALE;  }  #endif -static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } -static inline void sched_avg_update(struct rq *rq) { }  #endif  struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) @@ -2177,11 +2186,16 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}  #endif  #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL -static inline unsigned long cpu_util_dl(struct rq *rq) +static inline unsigned long cpu_bw_dl(struct rq *rq)  {  	return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;  } +static inline unsigned long cpu_util_dl(struct rq *rq) +{ +	return READ_ONCE(rq->avg_dl.util_avg); +} +  static inline unsigned long cpu_util_cfs(struct rq *rq)  {  	unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); @@ -2193,4 +2207,37 @@ static inline unsigned long cpu_util_cfs(struct rq *rq)  	return util;  } + +static inline unsigned long cpu_util_rt(struct rq *rq) +{ +	return READ_ONCE(rq->avg_rt.util_avg); +} +#endif + +#ifdef HAVE_SCHED_AVG_IRQ +static inline unsigned long cpu_util_irq(struct rq *rq) +{ +	return rq->avg_irq.util_avg; +} + +static inline +unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) +{ +	util *= (max - irq); +	util /= max; + +	return util; + +} +#else +static inline unsigned long cpu_util_irq(struct rq *rq) +{ +	return 0; +} + +static inline +unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) +{ +	return util; +}  #endif  |