diff options
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 97 | 
1 files changed, 73 insertions, 24 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 802b1f3405f2..0db2c1b3361e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -335,8 +335,6 @@ struct cfs_bandwidth {  	u64			quota;  	u64			runtime;  	s64			hierarchical_quota; -	u64			runtime_expires; -	int			expires_seq;  	u8			idle;  	u8			period_active; @@ -393,6 +391,16 @@ struct task_group {  #endif  	struct cfs_bandwidth	cfs_bandwidth; + +#ifdef CONFIG_UCLAMP_TASK_GROUP +	/* The two decimal precision [%] value requested from user-space */ +	unsigned int		uclamp_pct[UCLAMP_CNT]; +	/* Clamp values requested for a task group */ +	struct uclamp_se	uclamp_req[UCLAMP_CNT]; +	/* Effective clamp values used for a task group */ +	struct uclamp_se	uclamp[UCLAMP_CNT]; +#endif +  };  #ifdef CONFIG_FAIR_GROUP_SCHED @@ -483,7 +491,8 @@ struct cfs_rq {  	struct load_weight	load;  	unsigned long		runnable_weight;  	unsigned int		nr_running; -	unsigned int		h_nr_running; +	unsigned int		h_nr_running;      /* SCHED_{NORMAL,BATCH,IDLE} */ +	unsigned int		idle_h_nr_running; /* SCHED_IDLE */  	u64			exec_clock;  	u64			min_vruntime; @@ -556,8 +565,6 @@ struct cfs_rq {  #ifdef CONFIG_CFS_BANDWIDTH  	int			runtime_enabled; -	int			expires_seq; -	u64			runtime_expires;  	s64			runtime_remaining;  	u64			throttled_clock; @@ -777,9 +784,6 @@ struct root_domain {  	struct perf_domain __rcu *pd;  }; -extern struct root_domain def_root_domain; -extern struct mutex sched_domains_mutex; -  extern void init_defrootdomain(void);  extern int sched_init_domains(const struct cpumask *cpu_map);  extern void rq_attach_root(struct rq *rq, struct root_domain *rd); @@ -907,6 +911,10 @@ struct rq {  	atomic_t		nr_iowait; +#ifdef CONFIG_MEMBARRIER +	int membarrier_state; +#endif +  #ifdef CONFIG_SMP  	struct root_domain		*rd;  	struct sched_domain __rcu	*sd; @@ -1261,16 +1269,18 @@ enum numa_topology_type {  extern enum numa_topology_type sched_numa_topology_type;  extern int sched_max_numa_distance;  extern bool find_numa_distance(int distance); -#endif - -#ifdef CONFIG_NUMA  extern void sched_init_numa(void);  extern void sched_domains_numa_masks_set(unsigned int cpu);  extern void sched_domains_numa_masks_clear(unsigned int cpu); +extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);  #else  static inline void sched_init_numa(void) { }  static inline void sched_domains_numa_masks_set(unsigned int cpu) { }  static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } +static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) +{ +	return nr_cpu_ids; +}  #endif  #ifdef CONFIG_NUMA_BALANCING @@ -1449,10 +1459,14 @@ static inline void unregister_sched_domain_sysctl(void)  }  #endif +extern int newidle_balance(struct rq *this_rq, struct rq_flags *rf); +  #else  static inline void sched_ttwu_pending(void) { } +static inline int newidle_balance(struct rq *this_rq, struct rq_flags *rf) { return 0; } +  #endif /* CONFIG_SMP */  #include "stats.h" @@ -1700,17 +1714,21 @@ struct sched_class {  	void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);  	/* -	 * It is the responsibility of the pick_next_task() method that will -	 * return the next task to call put_prev_task() on the @prev task or -	 * something equivalent. +	 * Both @prev and @rf are optional and may be NULL, in which case the +	 * caller must already have invoked put_prev_task(rq, prev, rf). +	 * +	 * Otherwise it is the responsibility of the pick_next_task() to call +	 * put_prev_task() on the @prev task or something equivalent, IFF it +	 * returns a next task.  	 * -	 * May return RETRY_TASK when it finds a higher prio class has runnable -	 * tasks. +	 * In that case (@rf != NULL) it may return RETRY_TASK when it finds a +	 * higher prio class has runnable tasks.  	 */  	struct task_struct * (*pick_next_task)(struct rq *rq,  					       struct task_struct *prev,  					       struct rq_flags *rf); -	void (*put_prev_task)(struct rq *rq, struct task_struct *p); +	void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct rq_flags *rf); +	void (*set_next_task)(struct rq *rq, struct task_struct *p);  #ifdef CONFIG_SMP  	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); @@ -1725,7 +1743,6 @@ struct sched_class {  	void (*rq_offline)(struct rq *rq);  #endif -	void (*set_curr_task)(struct rq *rq);  	void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);  	void (*task_fork)(struct task_struct *p);  	void (*task_dead)(struct task_struct *p); @@ -1755,12 +1772,14 @@ struct sched_class {  static inline void put_prev_task(struct rq *rq, struct task_struct *prev)  { -	prev->sched_class->put_prev_task(rq, prev); +	WARN_ON_ONCE(rq->curr != prev); +	prev->sched_class->put_prev_task(rq, prev, NULL);  } -static inline void set_curr_task(struct rq *rq, struct task_struct *curr) +static inline void set_next_task(struct rq *rq, struct task_struct *next)  { -	curr->sched_class->set_curr_task(rq); +	WARN_ON_ONCE(rq->curr != next); +	next->sched_class->set_next_task(rq, next);  }  #ifdef CONFIG_SMP @@ -1943,7 +1962,7 @@ unsigned long arch_scale_freq_capacity(int cpu)  #endif  #ifdef CONFIG_SMP -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION  static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); @@ -1995,7 +2014,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)  	return ret;  } -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */  /*   * double_lock_balance - lock the busiest runqueue, this_rq is locked already. @@ -2266,7 +2285,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}  #endif /* CONFIG_CPU_FREQ */  #ifdef CONFIG_UCLAMP_TASK -unsigned int uclamp_eff_value(struct task_struct *p, unsigned int clamp_id); +enum uclamp_id uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);  static __always_inline  unsigned int uclamp_util_with(struct rq *rq, unsigned int util, @@ -2423,3 +2442,33 @@ static inline bool sched_energy_enabled(void)  static inline bool sched_energy_enabled(void) { return false; }  #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ + +#ifdef CONFIG_MEMBARRIER +/* + * The scheduler provides memory barriers required by membarrier between: + * - prior user-space memory accesses and store to rq->membarrier_state, + * - store to rq->membarrier_state and following user-space memory accesses. + * In the same way it provides those guarantees around store to rq->curr. + */ +static inline void membarrier_switch_mm(struct rq *rq, +					struct mm_struct *prev_mm, +					struct mm_struct *next_mm) +{ +	int membarrier_state; + +	if (prev_mm == next_mm) +		return; + +	membarrier_state = atomic_read(&next_mm->membarrier_state); +	if (READ_ONCE(rq->membarrier_state) == membarrier_state) +		return; + +	WRITE_ONCE(rq->membarrier_state, membarrier_state); +} +#else +static inline void membarrier_switch_mm(struct rq *rq, +					struct mm_struct *prev_mm, +					struct mm_struct *next_mm) +{ +} +#endif  |