diff options
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 115 | 
1 files changed, 100 insertions, 15 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7808ab050599..eeef1a3086d1 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -39,9 +39,9 @@  #include "cpuacct.h"  #ifdef CONFIG_SCHED_DEBUG -#define SCHED_WARN_ON(x)	WARN_ONCE(x, #x) +# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)  #else -#define SCHED_WARN_ON(x)	((void)(x)) +# define SCHED_WARN_ON(x)	({ (void)(x), 0; })  #endif  struct rq; @@ -218,23 +218,25 @@ static inline int dl_bandwidth_enabled(void)  	return sysctl_sched_rt_runtime >= 0;  } -extern struct dl_bw *dl_bw_of(int i); -  struct dl_bw {  	raw_spinlock_t lock;  	u64 bw, total_bw;  }; +static inline void __dl_update(struct dl_bw *dl_b, s64 bw); +  static inline -void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw) +void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw, int cpus)  {  	dl_b->total_bw -= tsk_bw; +	__dl_update(dl_b, (s32)tsk_bw / cpus);  }  static inline -void __dl_add(struct dl_bw *dl_b, u64 tsk_bw) +void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)  {  	dl_b->total_bw += tsk_bw; +	__dl_update(dl_b, -((s32)tsk_bw / cpus));  }  static inline @@ -244,7 +246,22 @@ bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)  	       dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;  } +void dl_change_utilization(struct task_struct *p, u64 new_bw);  extern void init_dl_bw(struct dl_bw *dl_b); +extern int sched_dl_global_validate(void); +extern void sched_dl_do_global(void); +extern int sched_dl_overflow(struct task_struct *p, int policy, +			     const struct sched_attr *attr); +extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); +extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); +extern bool __checkparam_dl(const struct sched_attr *attr); +extern void __dl_clear_params(struct task_struct *p); +extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); +extern int dl_task_can_attach(struct task_struct *p, +			      const struct cpumask *cs_cpus_allowed); +extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, +					const struct cpumask *trial); +extern bool dl_cpu_busy(unsigned int cpu);  #ifdef CONFIG_CGROUP_SCHED @@ -366,6 +383,11 @@ extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent  extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,  		struct sched_rt_entity *rt_se, int cpu,  		struct sched_rt_entity *parent); +extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); +extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); +extern long sched_group_rt_runtime(struct task_group *tg); +extern long sched_group_rt_period(struct task_group *tg); +extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);  extern struct task_group *sched_create_group(struct task_group *parent);  extern void sched_online_group(struct task_group *tg, @@ -558,6 +580,30 @@ struct dl_rq {  #else  	struct dl_bw dl_bw;  #endif +	/* +	 * "Active utilization" for this runqueue: increased when a +	 * task wakes up (becomes TASK_RUNNING) and decreased when a +	 * task blocks +	 */ +	u64 running_bw; + +	/* +	 * Utilization of the tasks "assigned" to this runqueue (including +	 * the tasks that are in runqueue and the tasks that executed on this +	 * CPU and blocked). Increased when a task moves to this runqueue, and +	 * decreased when the task moves away (migrates, changes scheduling +	 * policy, or terminates). +	 * This is needed to compute the "inactive utilization" for the +	 * runqueue (inactive utilization = this_bw - running_bw). +	 */ +	u64 this_bw; +	u64 extra_bw; + +	/* +	 * Inverse of the fraction of CPU utilization that can be reclaimed +	 * by the GRUB algorithm. +	 */ +	u64 bw_ratio;  };  #ifdef CONFIG_SMP @@ -606,11 +652,9 @@ struct root_domain {  extern struct root_domain def_root_domain;  extern struct mutex sched_domains_mutex; -extern cpumask_var_t fallback_doms; -extern cpumask_var_t sched_domains_tmpmask;  extern void init_defrootdomain(void); -extern int init_sched_domains(const struct cpumask *cpu_map); +extern int sched_init_domains(const struct cpumask *cpu_map);  extern void rq_attach_root(struct rq *rq, struct root_domain *rd);  #endif /* CONFIG_SMP */ @@ -1025,7 +1069,11 @@ struct sched_group_capacity {  	unsigned long next_update;  	int imbalance; /* XXX unrelated to capacity but shared group state */ -	unsigned long cpumask[0]; /* iteration mask */ +#ifdef CONFIG_SCHED_DEBUG +	int id; +#endif + +	unsigned long cpumask[0]; /* balance mask */  };  struct sched_group { @@ -1046,16 +1094,15 @@ struct sched_group {  	unsigned long cpumask[0];  }; -static inline struct cpumask *sched_group_cpus(struct sched_group *sg) +static inline struct cpumask *sched_group_span(struct sched_group *sg)  {  	return to_cpumask(sg->cpumask);  }  /* - * cpumask masking which cpus in the group are allowed to iterate up the domain - * tree. + * See build_balance_mask().   */ -static inline struct cpumask *sched_group_mask(struct sched_group *sg) +static inline struct cpumask *group_balance_mask(struct sched_group *sg)  {  	return to_cpumask(sg->sgc->cpumask);  } @@ -1066,7 +1113,7 @@ static inline struct cpumask *sched_group_mask(struct sched_group *sg)   */  static inline unsigned int group_first_cpu(struct sched_group *group)  { -	return cpumask_first(sched_group_cpus(group)); +	return cpumask_first(sched_group_span(group));  }  extern int group_balance_cpu(struct sched_group *sg); @@ -1422,7 +1469,11 @@ static inline void set_curr_task(struct rq *rq, struct task_struct *curr)  	curr->sched_class->set_curr_task(rq);  } +#ifdef CONFIG_SMP  #define sched_class_highest (&stop_sched_class) +#else +#define sched_class_highest (&dl_sched_class) +#endif  #define for_each_class(class) \     for (class = sched_class_highest; class; class = class->next) @@ -1467,6 +1518,8 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq)  }  #endif +extern void schedule_idle(void); +  extern void sysrq_sched_debug_show(void);  extern void sched_init_granularity(void);  extern void update_max_interval(void); @@ -1484,7 +1537,12 @@ extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime  extern struct dl_bandwidth def_dl_bandwidth;  extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);  extern void init_dl_task_timer(struct sched_dl_entity *dl_se); +extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); +extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); +#define BW_SHIFT	20 +#define BW_UNIT		(1 << BW_SHIFT) +#define RATIO_SHIFT	8  unsigned long to_ratio(u64 period, u64 runtime);  extern void init_entity_runnable_average(struct sched_entity *se); @@ -1926,6 +1984,33 @@ extern void nohz_balance_exit_idle(unsigned int cpu);  static inline void nohz_balance_exit_idle(unsigned int cpu) { }  #endif + +#ifdef CONFIG_SMP +static inline +void __dl_update(struct dl_bw *dl_b, s64 bw) +{ +	struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); +	int i; + +	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), +			 "sched RCU must be held"); +	for_each_cpu_and(i, rd->span, cpu_active_mask) { +		struct rq *rq = cpu_rq(i); + +		rq->dl.extra_bw += bw; +	} +} +#else +static inline +void __dl_update(struct dl_bw *dl_b, s64 bw) +{ +	struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); + +	dl->extra_bw += bw; +} +#endif + +  #ifdef CONFIG_IRQ_TIME_ACCOUNTING  struct irqtime {  	u64			total;  |