diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 149 | 
1 files changed, 77 insertions, 72 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 25f54c79f757..306f4f0c987a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -137,12 +137,6 @@ struct filename;  #define VMACACHE_MASK (VMACACHE_SIZE - 1)  /* - * List of flags we want to share for kernel threads, - * if only because they are not used by them anyway. - */ -#define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND) - -/*   * These are the constant used to fake the fixed-point load-average   * counting. Some notes:   *  - 11 bit fractions expand to 22 bits by the multiplies: this gives @@ -220,7 +214,7 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);  #define TASK_PARKED		512  #define TASK_STATE_MAX		1024 -#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" +#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"  extern char ___assert_task_state[1 - 2*!!(  		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; @@ -745,7 +739,6 @@ static inline int signal_group_exit(const struct signal_struct *sig)  struct user_struct {  	atomic_t __count;	/* reference count */  	atomic_t processes;	/* How many processes does this user have? */ -	atomic_t files;		/* How many open files does this user have? */  	atomic_t sigpending;	/* How many pending signals does this user have? */  #ifdef CONFIG_INOTIFY_USER  	atomic_t inotify_watches; /* How many inotify watches does this user have? */ @@ -854,10 +847,10 @@ enum cpu_idle_type {  };  /* - * Increase resolution of cpu_power calculations + * Increase resolution of cpu_capacity calculations   */ -#define SCHED_POWER_SHIFT	10 -#define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT) +#define SCHED_CAPACITY_SHIFT	10 +#define SCHED_CAPACITY_SCALE	(1L << SCHED_CAPACITY_SHIFT)  /*   * sched-domains (multiprocessor balancing) declarations: @@ -869,7 +862,8 @@ enum cpu_idle_type {  #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */  #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */  #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */ -#define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */ +#define SD_SHARE_CPUCAPACITY	0x0080	/* Domain members share cpu power */ +#define SD_SHARE_POWERDOMAIN	0x0100	/* Domain members share power domain */  #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */  #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */  #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */ @@ -877,7 +871,26 @@ enum cpu_idle_type {  #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */  #define SD_NUMA			0x4000	/* cross-node balancing */ -extern int __weak arch_sd_sibiling_asym_packing(void); +#ifdef CONFIG_SCHED_SMT +static inline const int cpu_smt_flags(void) +{ +	return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_SCHED_MC +static inline const int cpu_core_flags(void) +{ +	return SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_NUMA +static inline const int cpu_numa_flags(void) +{ +	return SD_NUMA; +} +#endif  struct sched_domain_attr {  	int relax_domain_level; @@ -985,6 +998,38 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);  bool cpus_share_cache(int this_cpu, int that_cpu); +typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); +typedef const int (*sched_domain_flags_f)(void); + +#define SDTL_OVERLAP	0x01 + +struct sd_data { +	struct sched_domain **__percpu sd; +	struct sched_group **__percpu sg; +	struct sched_group_capacity **__percpu sgc; +}; + +struct sched_domain_topology_level { +	sched_domain_mask_f mask; +	sched_domain_flags_f sd_flags; +	int		    flags; +	int		    numa_level; +	struct sd_data      data; +#ifdef CONFIG_SCHED_DEBUG +	char                *name; +#endif +}; + +extern struct sched_domain_topology_level *sched_domain_topology; + +extern void set_sched_topology(struct sched_domain_topology_level *tl); + +#ifdef CONFIG_SCHED_DEBUG +# define SD_INIT_NAME(type)		.name = #type +#else +# define SD_INIT_NAME(type) +#endif +  #else /* CONFIG_SMP */  struct sched_domain_attr; @@ -1123,8 +1168,8 @@ struct sched_dl_entity {  	/*  	 * Original scheduling parameters. Copied here from sched_attr -	 * during sched_setscheduler2(), they will remain the same until -	 * the next sched_setscheduler2(). +	 * during sched_setattr(), they will remain the same until +	 * the next sched_setattr().  	 */  	u64 dl_runtime;		/* maximum runtime for each instance	*/  	u64 dl_deadline;	/* relative deadline of each instance	*/ @@ -1153,9 +1198,12 @@ struct sched_dl_entity {  	 *  	 * @dl_boosted tells if we are boosted due to DI. If so we are  	 * outside bandwidth enforcement mechanism (but only until we -	 * exit the critical section). +	 * exit the critical section); +	 * +	 * @dl_yielded tells if task gave up the cpu before consuming +	 * all its available runtime during the last job.  	 */ -	int dl_throttled, dl_new, dl_boosted; +	int dl_throttled, dl_new, dl_boosted, dl_yielded;  	/*  	 * Bandwidth enforcement timer. Each -deadline task has its @@ -2125,7 +2173,7 @@ static inline void sched_autogroup_fork(struct signal_struct *sig) { }  static inline void sched_autogroup_exit(struct signal_struct *sig) { }  #endif -extern bool yield_to(struct task_struct *p, bool preempt); +extern int yield_to(struct task_struct *p, bool preempt);  extern void set_user_nice(struct task_struct *p, long nice);  extern int task_prio(const struct task_struct *p);  /** @@ -2366,9 +2414,6 @@ extern void flush_itimer_signals(void);  extern void do_group_exit(int); -extern int allow_signal(int); -extern int disallow_signal(int); -  extern int do_execve(struct filename *,  		     const char __user * const __user *,  		     const char __user * const __user *); @@ -2376,7 +2421,11 @@ extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, i  struct task_struct *fork_idle(int);  extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); -extern void set_task_comm(struct task_struct *tsk, const char *from); +extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); +static inline void set_task_comm(struct task_struct *tsk, const char *from) +{ +	__set_task_comm(tsk, from, false); +}  extern char *get_task_comm(char *to, struct task_struct *tsk);  #ifdef CONFIG_SMP @@ -2720,51 +2769,9 @@ static inline int spin_needbreak(spinlock_t *lock)  /*   * Idle thread specific functions to determine the need_resched - * polling state. We have two versions, one based on TS_POLLING in - * thread_info.status and one based on TIF_POLLING_NRFLAG in - * thread_info.flags + * polling state.   */ -#ifdef TS_POLLING -static inline int tsk_is_polling(struct task_struct *p) -{ -	return task_thread_info(p)->status & TS_POLLING; -} -static inline void __current_set_polling(void) -{ -	current_thread_info()->status |= TS_POLLING; -} - -static inline bool __must_check current_set_polling_and_test(void) -{ -	__current_set_polling(); - -	/* -	 * Polling state must be visible before we test NEED_RESCHED, -	 * paired by resched_task() -	 */ -	smp_mb(); - -	return unlikely(tif_need_resched()); -} - -static inline void __current_clr_polling(void) -{ -	current_thread_info()->status &= ~TS_POLLING; -} - -static inline bool __must_check current_clr_polling_and_test(void) -{ -	__current_clr_polling(); - -	/* -	 * Polling state must be visible before we test NEED_RESCHED, -	 * paired by resched_task() -	 */ -	smp_mb(); - -	return unlikely(tif_need_resched()); -} -#elif defined(TIF_POLLING_NRFLAG) +#ifdef TIF_POLLING_NRFLAG  static inline int tsk_is_polling(struct task_struct *p)  {  	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); @@ -2782,10 +2789,8 @@ static inline bool __must_check current_set_polling_and_test(void)  	/*  	 * Polling state must be visible before we test NEED_RESCHED,  	 * paired by resched_task() -	 * -	 * XXX: assumes set/clear bit are identical barrier wise.  	 */ -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  	return unlikely(tif_need_resched());  } @@ -2803,7 +2808,7 @@ static inline bool __must_check current_clr_polling_and_test(void)  	 * Polling state must be visible before we test NEED_RESCHED,  	 * paired by resched_task()  	 */ -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  	return unlikely(tif_need_resched());  } @@ -2956,7 +2961,7 @@ static inline void inc_syscw(struct task_struct *tsk)  #define TASK_SIZE_OF(tsk)	TASK_SIZE  #endif -#ifdef CONFIG_MM_OWNER +#ifdef CONFIG_MEMCG  extern void mm_update_next_owner(struct mm_struct *mm);  extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);  #else @@ -2967,7 +2972,7 @@ static inline void mm_update_next_owner(struct mm_struct *mm)  static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)  {  } -#endif /* CONFIG_MM_OWNER */ +#endif /* CONFIG_MEMCG */  static inline unsigned long task_rlimit(const struct task_struct *tsk,  		unsigned int limit)  |