diff options
Diffstat (limited to 'kernel/signal.c')
| -rw-r--r-- | kernel/signal.c | 268 | 
1 files changed, 155 insertions, 113 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index 8d8a940422a8..5843c541fda9 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -65,14 +65,14 @@ static void __user *sig_handler(struct task_struct *t, int sig)  	return t->sighand->action[sig - 1].sa.sa_handler;  } -static int sig_handler_ignored(void __user *handler, int sig) +static inline bool sig_handler_ignored(void __user *handler, int sig)  {  	/* Is it explicitly or implicitly ignored? */  	return handler == SIG_IGN || -		(handler == SIG_DFL && sig_kernel_ignore(sig)); +	       (handler == SIG_DFL && sig_kernel_ignore(sig));  } -static int sig_task_ignored(struct task_struct *t, int sig, bool force) +static bool sig_task_ignored(struct task_struct *t, int sig, bool force)  {  	void __user *handler; @@ -80,12 +80,12 @@ static int sig_task_ignored(struct task_struct *t, int sig, bool force)  	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&  	    handler == SIG_DFL && !(force && sig_kernel_only(sig))) -		return 1; +		return true;  	return sig_handler_ignored(handler, sig);  } -static int sig_ignored(struct task_struct *t, int sig, bool force) +static bool sig_ignored(struct task_struct *t, int sig, bool force)  {  	/*  	 * Blocked signals are never ignored, since the @@ -93,7 +93,7 @@ static int sig_ignored(struct task_struct *t, int sig, bool force)  	 * unblocked.  	 */  	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) -		return 0; +		return false;  	/*  	 * Tracers may want to know about even ignored signal unless it @@ -101,7 +101,7 @@ static int sig_ignored(struct task_struct *t, int sig, bool force)  	 * by SIGNAL_UNKILLABLE task.  	 */  	if (t->ptrace && sig != SIGKILL) -		return 0; +		return false;  	return sig_task_ignored(t, sig, force);  } @@ -110,7 +110,7 @@ static int sig_ignored(struct task_struct *t, int sig, bool force)   * Re-calculate pending state from the set of locally pending   * signals, globally pending signals, and blocked signals.   */ -static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) +static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)  {  	unsigned long ready;  	long i; @@ -138,20 +138,21 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)  #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) -static int recalc_sigpending_tsk(struct task_struct *t) +static bool recalc_sigpending_tsk(struct task_struct *t)  {  	if ((t->jobctl & JOBCTL_PENDING_MASK) ||  	    PENDING(&t->pending, &t->blocked) ||  	    PENDING(&t->signal->shared_pending, &t->blocked)) {  		set_tsk_thread_flag(t, TIF_SIGPENDING); -		return 1; +		return true;  	} +  	/*  	 * We must never clear the flag in another thread, or in current  	 * when it's possible the current syscall is returning -ERESTART*.  	 * So we don't clear it here, and only callers who know they should do.  	 */ -	return 0; +	return false;  }  /* @@ -172,6 +173,17 @@ void recalc_sigpending(void)  } +void calculate_sigpending(void) +{ +	/* Have any signals or users of TIF_SIGPENDING been delayed +	 * until after fork? +	 */ +	spin_lock_irq(¤t->sighand->siglock); +	set_tsk_thread_flag(current, TIF_SIGPENDING); +	recalc_sigpending(); +	spin_unlock_irq(¤t->sighand->siglock); +} +  /* Given the mask, find the first available signal that should be serviced. */  #define SYNCHRONOUS_MASK \ @@ -362,6 +374,20 @@ static bool task_participate_group_stop(struct task_struct *task)  	return false;  } +void task_join_group_stop(struct task_struct *task) +{ +	/* Have the new thread join an on-going signal group stop */ +	unsigned long jobctl = current->jobctl; +	if (jobctl & JOBCTL_STOP_PENDING) { +		struct signal_struct *sig = current->signal; +		unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK; +		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; +		if (task_set_jobctl_pending(task, signr | gstop)) { +			sig->group_stop_count++; +		} +	} +} +  /*   * allocate a new signal queue record   * - this may be called without locks if and only if t == current, otherwise an @@ -504,13 +530,15 @@ flush_signal_handlers(struct task_struct *t, int force_default)  	}  } -int unhandled_signal(struct task_struct *tsk, int sig) +bool unhandled_signal(struct task_struct *tsk, int sig)  {  	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;  	if (is_global_init(tsk)) -		return 1; +		return true; +  	if (handler != SIG_IGN && handler != SIG_DFL) -		return 0; +		return false; +  	/* if ptraced, let the tracer determine */  	return !tsk->ptrace;  } @@ -684,14 +712,14 @@ void signal_wake_up_state(struct task_struct *t, unsigned int state)   *   * All callers must be holding the siglock.   */ -static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) +static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)  {  	struct sigqueue *q, *n;  	sigset_t m;  	sigandsets(&m, mask, &s->signal);  	if (sigisemptyset(&m)) -		return 0; +		return;  	sigandnsets(&s->signal, &s->signal, mask);  	list_for_each_entry_safe(q, n, &s->list, list) { @@ -700,7 +728,6 @@ static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)  			__sigqueue_free(q);  		}  	} -	return 1;  }  static inline int is_si_special(const struct siginfo *info) @@ -717,21 +744,16 @@ static inline bool si_fromuser(const struct siginfo *info)  /*   * called with RCU read lock from check_kill_permission()   */ -static int kill_ok_by_cred(struct task_struct *t) +static bool kill_ok_by_cred(struct task_struct *t)  {  	const struct cred *cred = current_cred();  	const struct cred *tcred = __task_cred(t); -	if (uid_eq(cred->euid, tcred->suid) || -	    uid_eq(cred->euid, tcred->uid)  || -	    uid_eq(cred->uid,  tcred->suid) || -	    uid_eq(cred->uid,  tcred->uid)) -		return 1; - -	if (ns_capable(tcred->user_ns, CAP_KILL)) -		return 1; - -	return 0; +	return uid_eq(cred->euid, tcred->suid) || +	       uid_eq(cred->euid, tcred->uid) || +	       uid_eq(cred->uid, tcred->suid) || +	       uid_eq(cred->uid, tcred->uid) || +	       ns_capable(tcred->user_ns, CAP_KILL);  }  /* @@ -882,20 +904,24 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)   * as soon as they're available, so putting the signal on the shared queue   * will be equivalent to sending it to one such thread.   */ -static inline int wants_signal(int sig, struct task_struct *p) +static inline bool wants_signal(int sig, struct task_struct *p)  {  	if (sigismember(&p->blocked, sig)) -		return 0; +		return false; +  	if (p->flags & PF_EXITING) -		return 0; +		return false; +  	if (sig == SIGKILL) -		return 1; +		return true; +  	if (task_is_stopped_or_traced(p)) -		return 0; +		return false; +  	return task_curr(p) || !signal_pending(p);  } -static void complete_signal(int sig, struct task_struct *p, int group) +static void complete_signal(int sig, struct task_struct *p, enum pid_type type)  {  	struct signal_struct *signal = p->signal;  	struct task_struct *t; @@ -908,7 +934,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)  	 */  	if (wants_signal(sig, p))  		t = p; -	else if (!group || thread_group_empty(p)) +	else if ((type == PIDTYPE_PID) || thread_group_empty(p))  		/*  		 * There is just one thread and it does not need to be woken.  		 * It will dequeue unblocked signals before it runs again. @@ -971,7 +997,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)  	return;  } -static inline int legacy_queue(struct sigpending *signals, int sig) +static inline bool legacy_queue(struct sigpending *signals, int sig)  {  	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);  } @@ -998,7 +1024,7 @@ static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_str  #endif  static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, -			int group, int from_ancestor_ns) +			enum pid_type type, int from_ancestor_ns)  {  	struct sigpending *pending;  	struct sigqueue *q; @@ -1012,7 +1038,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,  			from_ancestor_ns || (info == SEND_SIG_FORCED)))  		goto ret; -	pending = group ? &t->signal->shared_pending : &t->pending; +	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;  	/*  	 * Short-circuit ignored signals and support queuing  	 * exactly one non-rt signal, so that we can get more @@ -1096,14 +1122,29 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,  out_set:  	signalfd_notify(t, sig);  	sigaddset(&pending->signal, sig); -	complete_signal(sig, t, group); + +	/* Let multiprocess signals appear after on-going forks */ +	if (type > PIDTYPE_TGID) { +		struct multiprocess_signals *delayed; +		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) { +			sigset_t *signal = &delayed->signal; +			/* Can't queue both a stop and a continue signal */ +			if (sig == SIGCONT) +				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK); +			else if (sig_kernel_stop(sig)) +				sigdelset(signal, SIGCONT); +			sigaddset(signal, sig); +		} +	} + +	complete_signal(sig, t, type);  ret: -	trace_signal_generate(sig, info, t, group, result); +	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);  	return ret;  }  static int send_signal(int sig, struct siginfo *info, struct task_struct *t, -			int group) +			enum pid_type type)  {  	int from_ancestor_ns = 0; @@ -1112,7 +1153,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,  			   !task_pid_nr_ns(current, task_active_pid_ns(t));  #endif -	return __send_signal(sig, info, t, group, from_ancestor_ns); +	return __send_signal(sig, info, t, type, from_ancestor_ns);  }  static void print_fatal_signal(int signr) @@ -1151,23 +1192,23 @@ __setup("print-fatal-signals=", setup_print_fatal_signals);  int  __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)  { -	return send_signal(sig, info, p, 1); +	return send_signal(sig, info, p, PIDTYPE_TGID);  }  static int  specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)  { -	return send_signal(sig, info, t, 0); +	return send_signal(sig, info, t, PIDTYPE_PID);  }  int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, -			bool group) +			enum pid_type type)  {  	unsigned long flags;  	int ret = -ESRCH;  	if (lock_task_sighand(p, &flags)) { -		ret = send_signal(sig, info, p, group); +		ret = send_signal(sig, info, p, type);  		unlock_task_sighand(p, &flags);  	} @@ -1274,7 +1315,8 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,  /*   * send signal info to all the members of a group   */ -int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) +int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, +			enum pid_type type)  {  	int ret; @@ -1283,7 +1325,7 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)  	rcu_read_unlock();  	if (!ret && sig) -		ret = do_send_sig_info(sig, info, p, true); +		ret = do_send_sig_info(sig, info, p, type);  	return ret;  } @@ -1301,7 +1343,7 @@ int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)  	success = 0;  	retval = -ESRCH;  	do_each_pid_task(pgrp, PIDTYPE_PGID, p) { -		int err = group_send_sig_info(sig, info, p); +		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);  		success |= !err;  		retval = err;  	} while_each_pid_task(pgrp, PIDTYPE_PGID, p); @@ -1317,7 +1359,7 @@ int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)  		rcu_read_lock();  		p = pid_task(pid, PIDTYPE_PID);  		if (p) -			error = group_send_sig_info(sig, info, p); +			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);  		rcu_read_unlock();  		if (likely(!p || error != -ESRCH))  			return error; @@ -1339,14 +1381,15 @@ static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)  	return error;  } -static int kill_as_cred_perm(const struct cred *cred, -			     struct task_struct *target) +static inline bool kill_as_cred_perm(const struct cred *cred, +				     struct task_struct *target)  {  	const struct cred *pcred = __task_cred(target); -	if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) && -	    !uid_eq(cred->uid,  pcred->suid) && !uid_eq(cred->uid,  pcred->uid)) -		return 0; -	return 1; + +	return uid_eq(cred->euid, pcred->suid) || +	       uid_eq(cred->euid, pcred->uid) || +	       uid_eq(cred->uid, pcred->suid) || +	       uid_eq(cred->uid, pcred->uid);  }  /* like kill_pid_info(), but doesn't use uid/euid of "current" */ @@ -1376,7 +1419,7 @@ int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,  	if (sig) {  		if (lock_task_sighand(p, &flags)) { -			ret = __send_signal(sig, info, p, 1, 0); +			ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);  			unlock_task_sighand(p, &flags);  		} else  			ret = -ESRCH; @@ -1420,7 +1463,8 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)  		for_each_process(p) {  			if (task_pid_vnr(p) > 1 &&  					!same_thread_group(p, current)) { -				int err = group_send_sig_info(sig, info, p); +				int err = group_send_sig_info(sig, info, p, +							      PIDTYPE_MAX);  				++count;  				if (err != -EPERM)  					retval = err; @@ -1446,7 +1490,7 @@ int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)  	if (!valid_signal(sig))  		return -EINVAL; -	return do_send_sig_info(sig, info, p, false); +	return do_send_sig_info(sig, info, p, PIDTYPE_PID);  }  #define __si_special(priv) \ @@ -1458,8 +1502,7 @@ send_sig(int sig, struct task_struct *p, int priv)  	return send_sig_info(sig, __si_special(priv), p);  } -void -force_sig(int sig, struct task_struct *p) +void force_sig(int sig, struct task_struct *p)  {  	force_sig_info(sig, SEND_SIG_PRIV, p);  } @@ -1470,8 +1513,7 @@ force_sig(int sig, struct task_struct *p)   * the problem was already a SIGSEGV, we'll want to   * make sure we don't even try to deliver the signal..   */ -int -force_sigsegv(int sig, struct task_struct *p) +void force_sigsegv(int sig, struct task_struct *p)  {  	if (sig == SIGSEGV) {  		unsigned long flags; @@ -1480,7 +1522,6 @@ force_sigsegv(int sig, struct task_struct *p)  		spin_unlock_irqrestore(&p->sighand->siglock, flags);  	}  	force_sig(SIGSEGV, p); -	return 0;  }  int force_sig_fault(int sig, int code, void __user *addr @@ -1664,17 +1705,20 @@ void sigqueue_free(struct sigqueue *q)  		__sigqueue_free(q);  } -int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) +int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)  {  	int sig = q->info.si_signo;  	struct sigpending *pending; +	struct task_struct *t;  	unsigned long flags;  	int ret, result;  	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));  	ret = -1; -	if (!likely(lock_task_sighand(t, &flags))) +	rcu_read_lock(); +	t = pid_task(pid, type); +	if (!t || !likely(lock_task_sighand(t, &flags)))  		goto ret;  	ret = 1; /* the signal is ignored */ @@ -1696,15 +1740,16 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)  	q->info.si_overrun = 0;  	signalfd_notify(t, sig); -	pending = group ? &t->signal->shared_pending : &t->pending; +	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;  	list_add_tail(&q->list, &pending->list);  	sigaddset(&pending->signal, sig); -	complete_signal(sig, t, group); +	complete_signal(sig, t, type);  	result = TRACE_SIGNAL_DELIVERED;  out: -	trace_signal_generate(sig, &q->info, t, group, result); +	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);  	unlock_task_sighand(t, &flags);  ret: +	rcu_read_unlock();  	return ret;  } @@ -1877,10 +1922,10 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,  	spin_unlock_irqrestore(&sighand->siglock, flags);  } -static inline int may_ptrace_stop(void) +static inline bool may_ptrace_stop(void)  {  	if (!likely(current->ptrace)) -		return 0; +		return false;  	/*  	 * Are we in the middle of do_coredump?  	 * If so and our tracer is also part of the coredump stopping @@ -1896,19 +1941,19 @@ static inline int may_ptrace_stop(void)  	 */  	if (unlikely(current->mm->core_state) &&  	    unlikely(current->mm == current->parent->mm)) -		return 0; +		return false; -	return 1; +	return true;  }  /*   * Return non-zero if there is a SIGKILL that should be waking us up.   * Called with the siglock held.   */ -static int sigkill_pending(struct task_struct *tsk) +static bool sigkill_pending(struct task_struct *tsk)  { -	return	sigismember(&tsk->pending.signal, SIGKILL) || -		sigismember(&tsk->signal->shared_pending.signal, SIGKILL); +	return sigismember(&tsk->pending.signal, SIGKILL) || +	       sigismember(&tsk->signal->shared_pending.signal, SIGKILL);  }  /* @@ -2288,7 +2333,7 @@ static int ptrace_signal(int signr, siginfo_t *info)  	return signr;  } -int get_signal(struct ksignal *ksig) +bool get_signal(struct ksignal *ksig)  {  	struct sighand_struct *sighand = current->sighand;  	struct signal_struct *signal = current->signal; @@ -2298,7 +2343,7 @@ int get_signal(struct ksignal *ksig)  		task_work_run();  	if (unlikely(uprobe_deny_signal())) -		return 0; +		return false;  	/*  	 * Do this once, we can't return to user-mode if freezing() == T. @@ -2755,7 +2800,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,  }  #endif -static int do_sigpending(sigset_t *set) +static void do_sigpending(sigset_t *set)  {  	spin_lock_irq(¤t->sighand->siglock);  	sigorsets(set, ¤t->pending.signal, @@ -2764,7 +2809,6 @@ static int do_sigpending(sigset_t *set)  	/* Outside the lock because only this thread touches it.  */  	sigandsets(set, ¤t->blocked, set); -	return 0;  }  /** @@ -2776,15 +2820,16 @@ static int do_sigpending(sigset_t *set)  SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)  {  	sigset_t set; -	int err;  	if (sigsetsize > sizeof(*uset))  		return -EINVAL; -	err = do_sigpending(&set); -	if (!err && copy_to_user(uset, &set, sigsetsize)) -		err = -EFAULT; -	return err; +	do_sigpending(&set); + +	if (copy_to_user(uset, &set, sigsetsize)) +		return -EFAULT; + +	return 0;  }  #ifdef CONFIG_COMPAT @@ -2792,15 +2837,13 @@ COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,  		compat_size_t, sigsetsize)  {  	sigset_t set; -	int err;  	if (sigsetsize > sizeof(*uset))  		return -EINVAL; -	err = do_sigpending(&set); -	if (!err) -		err = put_compat_sigset(uset, &set, sigsetsize); -	return err; +	do_sigpending(&set); + +	return put_compat_sigset(uset, &set, sigsetsize);  }  #endif @@ -3193,7 +3236,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)  		 * probe.  No signal is actually delivered.  		 */  		if (!error && sig) { -			error = do_send_sig_info(sig, info, p, false); +			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);  			/*  			 * If lock_task_sighand() failed we pretend the task  			 * dies after receiving the signal. The window is tiny, @@ -3562,25 +3605,26 @@ int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)  SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)  {  	sigset_t set; -	int err;  	if (sizeof(old_sigset_t) > sizeof(*uset))  		return -EINVAL; -	err = do_sigpending(&set); -	if (!err && copy_to_user(uset, &set, sizeof(old_sigset_t))) -		err = -EFAULT; -	return err; +	do_sigpending(&set); + +	if (copy_to_user(uset, &set, sizeof(old_sigset_t))) +		return -EFAULT; + +	return 0;  }  #ifdef CONFIG_COMPAT  COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)  {  	sigset_t set; -	int err = do_sigpending(&set); -	if (!err) -		err = put_user(set.sig[0], set32); -	return err; + +	do_sigpending(&set); + +	return put_user(set.sig[0], set32);  }  #endif @@ -3651,25 +3695,23 @@ SYSCALL_DEFINE4(rt_sigaction, int, sig,  		size_t, sigsetsize)  {  	struct k_sigaction new_sa, old_sa; -	int ret = -EINVAL; +	int ret;  	/* XXX: Don't preclude handling different sized sigset_t's.  */  	if (sigsetsize != sizeof(sigset_t)) -		goto out; +		return -EINVAL; -	if (act) { -		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) -			return -EFAULT; -	} +	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) +		return -EFAULT;  	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); +	if (ret) +		return ret; -	if (!ret && oact) { -		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) -			return -EFAULT; -	} -out: -	return ret; +	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) +		return -EFAULT; + +	return 0;  }  #ifdef CONFIG_COMPAT  COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, @@ -3960,7 +4002,7 @@ void kdb_send_sig(struct task_struct *t, int sig)  			   "the deadlock.\n");  		return;  	} -	ret = send_signal(sig, SEND_SIG_PRIV, t, false); +	ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);  	spin_unlock(&t->sighand->siglock);  	if (ret)  		kdb_printf("Fail to deliver Signal %d to process %d.\n",  |