diff options
Diffstat (limited to 'kernel/signal.c')
| -rw-r--r-- | kernel/signal.c | 109 | 
1 files changed, 42 insertions, 67 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index 952741f6d0f9..6f3476dc7873 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -426,22 +426,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,  	 */  	rcu_read_lock();  	ucounts = task_ucounts(t); -	sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1); -	switch (sigpending) { -	case 1: -		if (likely(get_ucounts(ucounts))) -			break; -		fallthrough; -	case LONG_MAX: -		/* -		 * we need to decrease the ucount in the userns tree on any -		 * failure to avoid counts leaking. -		 */ -		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1); -		rcu_read_unlock(); -		return NULL; -	} +	sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);  	rcu_read_unlock(); +	if (!sigpending) +		return NULL;  	if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {  		q = kmem_cache_alloc(sigqueue_cachep, gfp_flags); @@ -450,8 +438,7 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,  	}  	if (unlikely(q == NULL)) { -		if (dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) -			put_ucounts(ucounts); +		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);  	} else {  		INIT_LIST_HEAD(&q->list);  		q->flags = sigqueue_flags; @@ -464,8 +451,8 @@ static void __sigqueue_free(struct sigqueue *q)  {  	if (q->flags & SIGQUEUE_PREALLOC)  		return; -	if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) { -		put_ucounts(q->ucounts); +	if (q->ucounts) { +		dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);  		q->ucounts = NULL;  	}  	kmem_cache_free(sigqueue_cachep, q); @@ -2158,40 +2145,6 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,  	spin_unlock_irqrestore(&sighand->siglock, flags);  } -static inline bool may_ptrace_stop(void) -{ -	if (!likely(current->ptrace)) -		return false; -	/* -	 * Are we in the middle of do_coredump? -	 * If so and our tracer is also part of the coredump stopping -	 * is a deadlock situation, and pointless because our tracer -	 * is dead so don't allow us to stop. -	 * If SIGKILL was already sent before the caller unlocked -	 * ->siglock we must see ->core_state != NULL. Otherwise it -	 * is safe to enter schedule(). -	 * -	 * This is almost outdated, a task with the pending SIGKILL can't -	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported -	 * after SIGKILL was already dequeued. -	 */ -	if (unlikely(current->mm->core_state) && -	    unlikely(current->mm == current->parent->mm)) -		return false; - -	return true; -} - -/* - * Return non-zero if there is a SIGKILL that should be waking us up. - * Called with the siglock held. - */ -static bool sigkill_pending(struct task_struct *tsk) -{ -	return sigismember(&tsk->pending.signal, SIGKILL) || -	       sigismember(&tsk->signal->shared_pending.signal, SIGKILL); -} -  /*   * This must be called with current->sighand->siglock held.   * @@ -2209,7 +2162,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t  {  	bool gstop_done = false; -	if (arch_ptrace_stop_needed(exit_code, info)) { +	if (arch_ptrace_stop_needed()) {  		/*  		 * The arch code has something special to do before a  		 * ptrace stop.  This is allowed to block, e.g. for faults @@ -2217,17 +2170,16 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t  		 * calling arch_ptrace_stop, so we must release it now.  		 * To preserve proper semantics, we must do this before  		 * any signal bookkeeping like checking group_stop_count. -		 * Meanwhile, a SIGKILL could come in before we retake the -		 * siglock.  That must prevent us from sleeping in TASK_TRACED. -		 * So after regaining the lock, we must check for SIGKILL.  		 */  		spin_unlock_irq(¤t->sighand->siglock); -		arch_ptrace_stop(exit_code, info); +		arch_ptrace_stop();  		spin_lock_irq(¤t->sighand->siglock); -		if (sigkill_pending(current)) -			return;  	} +	/* +	 * schedule() will not sleep if there is a pending signal that +	 * can awaken the task. +	 */  	set_special_state(TASK_TRACED);  	/* @@ -2273,7 +2225,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t  	spin_unlock_irq(¤t->sighand->siglock);  	read_lock(&tasklist_lock); -	if (may_ptrace_stop()) { +	if (likely(current->ptrace)) {  		/*  		 * Notify parents of the stop.  		 * @@ -4151,11 +4103,29 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)  	return 0;  } +#ifdef CONFIG_DYNAMIC_SIGFRAME +static inline void sigaltstack_lock(void) +	__acquires(¤t->sighand->siglock) +{ +	spin_lock_irq(¤t->sighand->siglock); +} + +static inline void sigaltstack_unlock(void) +	__releases(¤t->sighand->siglock) +{ +	spin_unlock_irq(¤t->sighand->siglock); +} +#else +static inline void sigaltstack_lock(void) { } +static inline void sigaltstack_unlock(void) { } +#endif +  static int  do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,  		size_t min_ss_size)  {  	struct task_struct *t = current; +	int ret = 0;  	if (oss) {  		memset(oss, 0, sizeof(stack_t)); @@ -4179,19 +4149,24 @@ do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,  				ss_mode != 0))  			return -EINVAL; +		sigaltstack_lock();  		if (ss_mode == SS_DISABLE) {  			ss_size = 0;  			ss_sp = NULL;  		} else {  			if (unlikely(ss_size < min_ss_size)) -				return -ENOMEM; +				ret = -ENOMEM; +			if (!sigaltstack_size_valid(ss_size)) +				ret = -ENOMEM;  		} - -		t->sas_ss_sp = (unsigned long) ss_sp; -		t->sas_ss_size = ss_size; -		t->sas_ss_flags = ss_flags; +		if (!ret) { +			t->sas_ss_sp = (unsigned long) ss_sp; +			t->sas_ss_size = ss_size; +			t->sas_ss_flags = ss_flags; +		} +		sigaltstack_unlock();  	} -	return 0; +	return ret;  }  SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)  |