diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 54 | 
1 files changed, 24 insertions, 30 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 9f51932bd543..2c2e56bd8913 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -25,9 +25,11 @@  #include <linux/resource.h>  #include <linux/latencytop.h>  #include <linux/sched/prio.h> +#include <linux/sched/types.h>  #include <linux/signal_types.h>  #include <linux/mm_types_task.h>  #include <linux/task_io_accounting.h> +#include <linux/posix-timers.h>  #include <linux/rseq.h>  /* task_struct member predeclarations (sorted alphabetically): */ @@ -244,27 +246,6 @@ struct prev_cputime {  #endif  }; -/** - * struct task_cputime - collected CPU time counts - * @utime:		time spent in user mode, in nanoseconds - * @stime:		time spent in kernel mode, in nanoseconds - * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds - * - * This structure groups together three kinds of CPU time that are tracked for - * threads and thread groups.  Most things considering CPU time want to group - * these counts together and treat all three of them in parallel. - */ -struct task_cputime { -	u64				utime; -	u64				stime; -	unsigned long long		sum_exec_runtime; -}; - -/* Alternate field names when used on cache expirations: */ -#define virt_exp			utime -#define prof_exp			stime -#define sched_exp			sum_exec_runtime -  enum vtime_state {  	/* Task is sleeping or running in a CPU with VTIME inactive: */  	VTIME_INACTIVE = 0, @@ -295,6 +276,11 @@ enum uclamp_id {  	UCLAMP_CNT  }; +#ifdef CONFIG_SMP +extern struct root_domain def_root_domain; +extern struct mutex sched_domains_mutex; +#endif +  struct sched_info {  #ifdef CONFIG_SCHED_INFO  	/* Cumulative counters: */ @@ -876,10 +862,8 @@ struct task_struct {  	unsigned long			min_flt;  	unsigned long			maj_flt; -#ifdef CONFIG_POSIX_TIMERS -	struct task_cputime		cputime_expires; -	struct list_head		cpu_timers[3]; -#endif +	/* Empty if CONFIG_POSIX_CPUTIMERS=n */ +	struct posix_cputimers		posix_cputimers;  	/* Process credentials: */ @@ -974,6 +958,10 @@ struct task_struct {  	struct mutex_waiter		*blocked_on;  #endif +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP +	int				non_block_count; +#endif +  #ifdef CONFIG_TRACE_IRQFLAGS  	unsigned int			irq_events;  	unsigned long			hardirq_enable_ip; @@ -1142,7 +1130,10 @@ struct task_struct {  	struct tlbflush_unmap_batch	tlb_ubc; -	struct rcu_head			rcu; +	union { +		refcount_t		rcu_users; +		struct rcu_head		rcu; +	};  	/* Cache last used pipe for splice(): */  	struct pipe_inode_info		*splice_pipe; @@ -1767,7 +1758,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)   * value indicates whether a reschedule was done in fact.   * cond_resched_lock() will drop the spinlock before scheduling,   */ -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION  extern int _cond_resched(void);  #else  static inline int _cond_resched(void) { return 0; } @@ -1796,12 +1787,12 @@ static inline void cond_resched_rcu(void)  /*   * Does a critical section need to be broken due to another - * task waiting?: (technically does not depend on CONFIG_PREEMPT, + * task waiting?: (technically does not depend on CONFIG_PREEMPTION,   * but a general need for low latency)   */  static inline int spin_needbreak(spinlock_t *lock)  { -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION  	return spin_is_contended(lock);  #else  	return 0; @@ -1851,7 +1842,10 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)   * running or not.   */  #ifndef vcpu_is_preempted -# define vcpu_is_preempted(cpu)	false +static inline bool vcpu_is_preempted(int cpu) +{ +	return false; +}  #endif  extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);  |