diff options
Diffstat (limited to 'kernel/exit.c')
| -rw-r--r-- | kernel/exit.c | 79 | 
1 files changed, 43 insertions, 36 deletions
| diff --git a/kernel/exit.c b/kernel/exit.c index 91a43e57a32e..f702a6a63686 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -48,7 +48,6 @@  #include <linux/pipe_fs_i.h>  #include <linux/audit.h> /* for audit_free() */  #include <linux/resource.h> -#include <linux/blkdev.h>  #include <linux/task_io_accounting_ops.h>  #include <linux/tracehook.h>  #include <linux/fs_struct.h> @@ -64,6 +63,7 @@  #include <linux/rcuwait.h>  #include <linux/compat.h>  #include <linux/io_uring.h> +#include <linux/kprobes.h>  #include <linux/uaccess.h>  #include <asm/unistd.h> @@ -168,6 +168,7 @@ static void delayed_put_task_struct(struct rcu_head *rhp)  {  	struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); +	kprobe_flush_task(tsk);  	perf_event_delayed_put(tsk);  	trace_sched_process_free(tsk);  	put_task_struct(tsk); @@ -339,6 +340,46 @@ kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)  	}  } +static void coredump_task_exit(struct task_struct *tsk) +{ +	struct core_state *core_state; + +	/* +	 * Serialize with any possible pending coredump. +	 * We must hold siglock around checking core_state +	 * and setting PF_POSTCOREDUMP.  The core-inducing thread +	 * will increment ->nr_threads for each thread in the +	 * group without PF_POSTCOREDUMP set. +	 */ +	spin_lock_irq(&tsk->sighand->siglock); +	tsk->flags |= PF_POSTCOREDUMP; +	core_state = tsk->signal->core_state; +	spin_unlock_irq(&tsk->sighand->siglock); +	if (core_state) { +		struct core_thread self; + +		self.task = current; +		if (self.task->flags & PF_SIGNALED) +			self.next = xchg(&core_state->dumper.next, &self); +		else +			self.task = NULL; +		/* +		 * Implies mb(), the result of xchg() must be visible +		 * to core_state->dumper. +		 */ +		if (atomic_dec_and_test(&core_state->nr_threads)) +			complete(&core_state->startup); + +		for (;;) { +			set_current_state(TASK_UNINTERRUPTIBLE); +			if (!self.task) /* see coredump_finish() */ +				break; +			freezable_schedule(); +		} +		__set_current_state(TASK_RUNNING); +	} +} +  #ifdef CONFIG_MEMCG  /*   * A task is exiting.   If it owned this mm, find a new owner for the mm. @@ -434,47 +475,12 @@ assign_new_owner:  static void exit_mm(void)  {  	struct mm_struct *mm = current->mm; -	struct core_state *core_state;  	exit_mm_release(current, mm);  	if (!mm)  		return;  	sync_mm_rss(mm); -	/* -	 * Serialize with any possible pending coredump. -	 * We must hold mmap_lock around checking core_state -	 * and clearing tsk->mm.  The core-inducing thread -	 * will increment ->nr_threads for each thread in the -	 * group with ->mm != NULL. -	 */  	mmap_read_lock(mm); -	core_state = mm->core_state; -	if (core_state) { -		struct core_thread self; - -		mmap_read_unlock(mm); - -		self.task = current; -		if (self.task->flags & PF_SIGNALED) -			self.next = xchg(&core_state->dumper.next, &self); -		else -			self.task = NULL; -		/* -		 * Implies mb(), the result of xchg() must be visible -		 * to core_state->dumper. -		 */ -		if (atomic_dec_and_test(&core_state->nr_threads)) -			complete(&core_state->startup); - -		for (;;) { -			set_current_state(TASK_UNINTERRUPTIBLE); -			if (!self.task) /* see coredump_finish() */ -				break; -			freezable_schedule(); -		} -		__set_current_state(TASK_RUNNING); -		mmap_read_lock(mm); -	}  	mmgrab(mm);  	BUG_ON(mm != current->active_mm);  	/* more a memory barrier than a real lock */ @@ -762,6 +768,7 @@ void __noreturn do_exit(long code)  	profile_task_exit(tsk);  	kcov_task_exit(tsk); +	coredump_task_exit(tsk);  	ptrace_event(PTRACE_EVENT_EXIT, code);  	validate_creds_for_do_exit(tsk); |