diff options
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 92 | 
1 files changed, 85 insertions, 7 deletions
| diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index a4a20046e586..771f8ddb7053 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1041,6 +1041,7 @@ struct rq {  	unsigned long		cpu_capacity;  	unsigned long		cpu_capacity_orig; +	unsigned long		cpu_capacity_inverted;  	struct balance_callback *balance_callback; @@ -1150,6 +1151,9 @@ struct rq {  	unsigned int		core_forceidle_occupation;  	u64			core_forceidle_start;  #endif + +	/* Scratch cpumask to be temporarily used under rq_lock */ +	cpumask_var_t		scratch_mask;  };  #ifdef CONFIG_FAIR_GROUP_SCHED @@ -1877,6 +1881,13 @@ static inline void dirty_sched_domain_sysctl(int cpu)  #endif  extern int sched_update_scaling(void); + +static inline const struct cpumask *task_user_cpus(struct task_struct *p) +{ +	if (!p->user_cpus_ptr) +		return cpu_possible_mask; /* &init_task.cpus_mask */ +	return p->user_cpus_ptr; +}  #endif /* CONFIG_SMP */  #include "stats.h" @@ -2144,6 +2155,12 @@ extern const u32		sched_prio_to_wmult[40];  #define RETRY_TASK		((void *)-1UL) +struct affinity_context { +	const struct cpumask *new_mask; +	struct cpumask *user_mask; +	unsigned int flags; +}; +  struct sched_class {  #ifdef CONFIG_UCLAMP_TASK @@ -2172,9 +2189,7 @@ struct sched_class {  	void (*task_woken)(struct rq *this_rq, struct task_struct *task); -	void (*set_cpus_allowed)(struct task_struct *p, -				 const struct cpumask *newmask, -				 u32 flags); +	void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx);  	void (*rq_online)(struct rq *rq);  	void (*rq_offline)(struct rq *rq); @@ -2285,7 +2300,7 @@ extern void update_group_capacity(struct sched_domain *sd, int cpu);  extern void trigger_load_balance(struct rq *rq); -extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags); +extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx);  static inline struct task_struct *get_push_task(struct rq *rq)  { @@ -2878,6 +2893,24 @@ static inline unsigned long capacity_orig_of(int cpu)  	return cpu_rq(cpu)->cpu_capacity_orig;  } +/* + * Returns inverted capacity if the CPU is in capacity inversion state. + * 0 otherwise. + * + * Capacity inversion detection only considers thermal impact where actual + * performance points (OPPs) gets dropped. + * + * Capacity inversion state happens when another performance domain that has + * equal or lower capacity_orig_of() becomes effectively larger than the perf + * domain this CPU belongs to due to thermal pressure throttling it hard. + * + * See comment in update_cpu_capacity(). + */ +static inline unsigned long cpu_in_capacity_inversion(int cpu) +{ +	return cpu_rq(cpu)->cpu_capacity_inverted; +} +  /**   * enum cpu_util_type - CPU utilization type   * @FREQUENCY_UTIL:	Utilization used to select frequency @@ -2979,6 +3012,23 @@ static inline unsigned long cpu_util_rt(struct rq *rq)  #ifdef CONFIG_UCLAMP_TASK  unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); +static inline unsigned long uclamp_rq_get(struct rq *rq, +					  enum uclamp_id clamp_id) +{ +	return READ_ONCE(rq->uclamp[clamp_id].value); +} + +static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, +				 unsigned int value) +{ +	WRITE_ONCE(rq->uclamp[clamp_id].value, value); +} + +static inline bool uclamp_rq_is_idle(struct rq *rq) +{ +	return rq->uclamp_flags & UCLAMP_FLAG_IDLE; +} +  /**   * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.   * @rq:		The rq to clamp against. Must not be NULL. @@ -3014,12 +3064,12 @@ unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,  		 * Ignore last runnable task's max clamp, as this task will  		 * reset it. Similarly, no need to read the rq's min clamp.  		 */ -		if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) +		if (uclamp_rq_is_idle(rq))  			goto out;  	} -	min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); -	max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); +	min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN)); +	max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX));  out:  	/*  	 * Since CPU's {min,max}_util clamps are MAX aggregated considering @@ -3060,6 +3110,15 @@ static inline bool uclamp_is_used(void)  	return static_branch_likely(&sched_uclamp_used);  }  #else /* CONFIG_UCLAMP_TASK */ +static inline unsigned long uclamp_eff_value(struct task_struct *p, +					     enum uclamp_id clamp_id) +{ +	if (clamp_id == UCLAMP_MIN) +		return 0; + +	return SCHED_CAPACITY_SCALE; +} +  static inline  unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,  				  struct task_struct *p) @@ -3073,6 +3132,25 @@ static inline bool uclamp_is_used(void)  {  	return false;  } + +static inline unsigned long uclamp_rq_get(struct rq *rq, +					  enum uclamp_id clamp_id) +{ +	if (clamp_id == UCLAMP_MIN) +		return 0; + +	return SCHED_CAPACITY_SCALE; +} + +static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, +				 unsigned int value) +{ +} + +static inline bool uclamp_rq_is_idle(struct rq *rq) +{ +	return false; +}  #endif /* CONFIG_UCLAMP_TASK */  #ifdef CONFIG_HAVE_SCHED_AVG_IRQ |