diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 74 | 
1 files changed, 49 insertions, 25 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index a837b20ba190..20b03bf94748 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -808,7 +808,7 @@ enum cpu_idle_type {   * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the   * increased costs.   */ -#if BITS_PER_LONG > 32 +#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load  */  # define SCHED_LOAD_RESOLUTION	10  # define scale_load(w)		((w) << SCHED_LOAD_RESOLUTION)  # define scale_load_down(w)	((w) >> SCHED_LOAD_RESOLUTION) @@ -844,6 +844,7 @@ enum cpu_idle_type {  #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */  #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */  #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */ +#define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */  enum powersavings_balance_level {  	POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */ @@ -893,16 +894,21 @@ static inline int sd_power_saving_flags(void)  	return 0;  } -struct sched_group { -	struct sched_group *next;	/* Must be a circular list */ +struct sched_group_power {  	atomic_t ref; -  	/*  	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a  	 * single CPU.  	 */ -	unsigned int cpu_power, cpu_power_orig; +	unsigned int power, power_orig; +}; + +struct sched_group { +	struct sched_group *next;	/* Must be a circular list */ +	atomic_t ref; +  	unsigned int group_weight; +	struct sched_group_power *sgp;  	/*  	 * The CPUs this group covers. @@ -1254,6 +1260,9 @@ struct task_struct {  #ifdef CONFIG_PREEMPT_RCU  	int rcu_read_lock_nesting;  	char rcu_read_unlock_special; +#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) +	int rcu_boosted; +#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */  	struct list_head rcu_node_entry;  #endif /* #ifdef CONFIG_PREEMPT_RCU */  #ifdef CONFIG_TREE_PREEMPT_RCU @@ -1283,7 +1292,7 @@ struct task_struct {  	int exit_state;  	int exit_code, exit_signal;  	int pdeath_signal;  /*  The signal sent when the parent dies  */ -	unsigned int group_stop;	/* GROUP_STOP_*, siglock protected */ +	unsigned int jobctl;	/* JOBCTL_*, siglock protected */  	/* ??? */  	unsigned int personality;  	unsigned did_exec:1; @@ -1503,7 +1512,6 @@ struct task_struct {  	short il_next;  	short pref_node_fork;  #endif -	atomic_t fs_excl;	/* holding fs exclusive resources */  	struct rcu_head rcu;  	/* @@ -1804,15 +1812,34 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *  #define used_math() tsk_used_math(current)  /* - * task->group_stop flags + * task->jobctl flags   */ -#define GROUP_STOP_SIGMASK	0xffff    /* signr of the last group stop */ -#define GROUP_STOP_PENDING	(1 << 16) /* task should stop for group stop */ -#define GROUP_STOP_CONSUME	(1 << 17) /* consume group stop count */ -#define GROUP_STOP_TRAPPING	(1 << 18) /* switching from STOPPED to TRACED */ -#define GROUP_STOP_DEQUEUED	(1 << 19) /* stop signal dequeued */ - -extern void task_clear_group_stop_pending(struct task_struct *task); +#define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */ + +#define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */ +#define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */ +#define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */ +#define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */ +#define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */ +#define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */ +#define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */ + +#define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT) +#define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT) +#define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT) +#define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT) +#define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT) +#define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT) +#define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT) + +#define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) +#define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) + +extern bool task_set_jobctl_pending(struct task_struct *task, +				    unsigned int mask); +extern void task_clear_jobctl_trapping(struct task_struct *task); +extern void task_clear_jobctl_pending(struct task_struct *task, +				      unsigned int mask);  #ifdef CONFIG_PREEMPT_RCU @@ -2127,7 +2154,7 @@ static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, s  	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);  	return ret; -}	 +}  extern void block_all_signals(int (*notifier)(void *priv), void *priv,  			      sigset_t *mask); @@ -2142,7 +2169,7 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_  extern int kill_pgrp(struct pid *pid, int sig, int priv);  extern int kill_pid(struct pid *pid, int sig, int priv);  extern int kill_proc_info(int, struct siginfo *, pid_t); -extern int do_notify_parent(struct task_struct *, int); +extern __must_check bool do_notify_parent(struct task_struct *, int);  extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);  extern void force_sig(int, struct task_struct *);  extern int send_sig(int, struct task_struct *, int); @@ -2266,8 +2293,10 @@ static inline int get_nr_threads(struct task_struct *tsk)  	return tsk->signal->nr_threads;  } -/* de_thread depends on thread_group_leader not being a pid based check */ -#define thread_group_leader(p)	(p == p->group_leader) +static inline bool thread_group_leader(struct task_struct *p) +{ +	return p->exit_signal >= 0; +}  /* Do to the insanities of de_thread it is possible for a process   * to have the pid of the thread group leader without actually being @@ -2300,11 +2329,6 @@ static inline int thread_group_empty(struct task_struct *p)  #define delay_group_leader(p) \  		(thread_group_leader(p) && !thread_group_empty(p)) -static inline int task_detached(struct task_struct *p) -{ -	return p->exit_signal == -1; -} -  /*   * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring   * subscriptions and synchronises with wait4().  Also used in procfs.  Also @@ -2501,7 +2525,7 @@ extern int _cond_resched(void);  extern int __cond_resched_lock(spinlock_t *lock); -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT  #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET  #else  #define PREEMPT_LOCK_OFFSET	0  |