diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 51 | 
1 files changed, 23 insertions, 28 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index d2f90fa92468..f073bd59df32 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -21,6 +21,7 @@  #include <linux/seccomp.h>  #include <linux/nodemask.h>  #include <linux/rcupdate.h> +#include <linux/refcount.h>  #include <linux/resource.h>  #include <linux/latencytop.h>  #include <linux/sched/prio.h> @@ -47,6 +48,7 @@ struct pid_namespace;  struct pipe_inode_info;  struct rcu_node;  struct reclaim_state; +struct capture_control;  struct robust_list_head;  struct sched_attr;  struct sched_param; @@ -356,12 +358,6 @@ struct util_est {   * For cfs_rq, it is the aggregated load_avg of all runnable and   * blocked sched_entities.   * - * load_avg may also take frequency scaling into account: - * - *   load_avg = runnable% * scale_load_down(load) * freq% - * - * where freq% is the CPU frequency normalized to the highest frequency. - *   * [util_avg definition]   *   *   util_avg = running% * SCHED_CAPACITY_SCALE @@ -370,17 +366,14 @@ struct util_est {   * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable   * and blocked sched_entities.   * - * util_avg may also factor frequency scaling and CPU capacity scaling: - * - *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity% - * - * where freq% is the same as above, and capacity% is the CPU capacity - * normalized to the greatest capacity (due to uarch differences, etc). + * load_avg and util_avg don't direcly factor frequency scaling and CPU + * capacity scaling. The scaling is done through the rq_clock_pelt that + * is used for computing those signals (see update_rq_clock_pelt())   * - * N.B., the above ratios (runnable%, running%, freq%, and capacity%) - * themselves are in the range of [0, 1]. To do fixed point arithmetics, - * we therefore scale them to as large a range as necessary. This is for - * example reflected by util_avg's SCHED_CAPACITY_SCALE. + * N.B., the above ratios (runnable% and running%) themselves are in the + * range of [0, 1]. To do fixed point arithmetics, we therefore scale them + * to as large a range as necessary. This is for example reflected by + * util_avg's SCHED_CAPACITY_SCALE.   *   * [Overflow issue]   * @@ -607,7 +600,7 @@ struct task_struct {  	randomized_struct_fields_start  	void				*stack; -	atomic_t			usage; +	refcount_t			usage;  	/* Per task flags (PF_*), defined further below: */  	unsigned int			flags;  	unsigned int			ptrace; @@ -739,12 +732,6 @@ struct task_struct {  	unsigned			use_memdelay:1;  #endif -	/* -	 * May usercopy functions fault on kernel addresses? -	 * This is not just a single bit because this can potentially nest. -	 */ -	unsigned int			kernel_uaccess_faults_ok; -  	unsigned long			atomic_flags; /* Flags requiring atomic access. */  	struct restart_block		restart_block; @@ -964,6 +951,9 @@ struct task_struct {  	struct io_context		*io_context; +#ifdef CONFIG_COMPACTION +	struct capture_control		*capture_control; +#endif  	/* Ptrace state: */  	unsigned long			ptrace_message;  	kernel_siginfo_t		*last_siginfo; @@ -995,7 +985,7 @@ struct task_struct {  	/* cg_list protected by css_set_lock and tsk->alloc_lock: */  	struct list_head		cg_list;  #endif -#ifdef CONFIG_X86_RESCTRL +#ifdef CONFIG_X86_CPU_RESCTRL  	u32				closid;  	u32				rmid;  #endif @@ -1193,7 +1183,7 @@ struct task_struct {  #endif  #ifdef CONFIG_THREAD_INFO_IN_TASK  	/* A live task holds one reference: */ -	atomic_t			stack_refcount; +	refcount_t			stack_refcount;  #endif  #ifdef CONFIG_LIVEPATCH  	int patch_state; @@ -1409,7 +1399,7 @@ extern struct pid *cad_pid;  #define PF_UMH			0x02000000	/* I'm an Usermodehelper process */  #define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_allowed */  #define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */ -#define PF_MUTEX_TESTER		0x20000000	/* Thread belongs to the rt mutex tester */ +#define PF_MEMALLOC_NOCMA	0x10000000	/* All allocation request will have _GFP_MOVABLE cleared */  #define PF_FREEZER_SKIP		0x40000000	/* Freezer should not count it as freezable */  #define PF_SUSPEND_TASK		0x80000000      /* This thread called freeze_processes() and should not be frozen */ @@ -1459,6 +1449,7 @@ static inline bool is_percpu_thread(void)  #define PFA_SPEC_SSB_FORCE_DISABLE	4	/* Speculative Store Bypass force disabled*/  #define PFA_SPEC_IB_DISABLE		5	/* Indirect branch speculation restricted */  #define PFA_SPEC_IB_FORCE_DISABLE	6	/* Indirect branch speculation permanently restricted */ +#define PFA_SPEC_SSB_NOEXEC		7	/* Speculative Store Bypass clear on execve() */  #define TASK_PFA_TEST(name, func)					\  	static inline bool task_##func(struct task_struct *p)		\ @@ -1487,6 +1478,10 @@ TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)  TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)  TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) +TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec) +TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec) +TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec) +  TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)  TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) @@ -1754,9 +1749,9 @@ static __always_inline bool need_resched(void)  static inline unsigned int task_cpu(const struct task_struct *p)  {  #ifdef CONFIG_THREAD_INFO_IN_TASK -	return p->cpu; +	return READ_ONCE(p->cpu);  #else -	return task_thread_info(p)->cpu; +	return READ_ONCE(task_thread_info(p)->cpu);  #endif  }  |