diff options
Diffstat (limited to 'kernel/fork.c')
| -rw-r--r-- | kernel/fork.c | 49 | 
1 files changed, 32 insertions, 17 deletions
| diff --git a/kernel/fork.c b/kernel/fork.c index 08969f5aa38d..7a08025d2c99 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -75,7 +75,6 @@  #include <linux/freezer.h>  #include <linux/delayacct.h>  #include <linux/taskstats_kern.h> -#include <linux/random.h>  #include <linux/tty.h>  #include <linux/fs_struct.h>  #include <linux/magic.h> @@ -97,6 +96,7 @@  #include <linux/scs.h>  #include <linux/io_uring.h>  #include <linux/bpf.h> +#include <linux/stackprotector.h>  #include <asm/pgalloc.h>  #include <linux/uaccess.h> @@ -535,6 +535,9 @@ void put_task_stack(struct task_struct *tsk)  void free_task(struct task_struct *tsk)  { +#ifdef CONFIG_SECCOMP +	WARN_ON_ONCE(tsk->seccomp.filter); +#endif  	release_user_cpus_ptr(tsk);  	scs_release(tsk); @@ -753,8 +756,13 @@ static void check_mm(struct mm_struct *mm)  			 "Please make sure 'struct resident_page_types[]' is updated as well");  	for (i = 0; i < NR_MM_COUNTERS; i++) { -		long x = atomic_long_read(&mm->rss_stat.count[i]); +		long x = percpu_counter_sum(&mm->rss_stat[i]); + +		if (likely(!x)) +			continue; +		/* Making sure this is not due to race with CPU offlining. */ +		x = percpu_counter_sum_all(&mm->rss_stat[i]);  		if (unlikely(x))  			pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",  				 mm, resident_page_types[i], x); @@ -779,6 +787,8 @@ static void check_mm(struct mm_struct *mm)   */  void __mmdrop(struct mm_struct *mm)  { +	int i; +  	BUG_ON(mm == &init_mm);  	WARN_ON_ONCE(mm == current->mm);  	WARN_ON_ONCE(mm == current->active_mm); @@ -788,6 +798,9 @@ void __mmdrop(struct mm_struct *mm)  	check_mm(mm);  	put_user_ns(mm->user_ns);  	mm_pasid_drop(mm); + +	for (i = 0; i < NR_MM_COUNTERS; i++) +		percpu_counter_destroy(&mm->rss_stat[i]);  	free_mm(mm);  }  EXPORT_SYMBOL_GPL(__mmdrop); @@ -1107,6 +1120,8 @@ static void mm_init_uprobes_state(struct mm_struct *mm)  static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,  	struct user_namespace *user_ns)  { +	int i; +  	mt_init_flags(&mm->mm_mt, MM_MT_FLAGS);  	mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock);  	atomic_set(&mm->mm_users, 1); @@ -1148,10 +1163,17 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,  	if (init_new_context(p, mm))  		goto fail_nocontext; +	for (i = 0; i < NR_MM_COUNTERS; i++) +		if (percpu_counter_init(&mm->rss_stat[i], 0, GFP_KERNEL_ACCOUNT)) +			goto fail_pcpu; +  	mm->user_ns = get_user_ns(user_ns);  	lru_gen_init_mm(mm);  	return mm; +fail_pcpu: +	while (i > 0) +		percpu_counter_destroy(&mm->rss_stat[--i]);  fail_nocontext:  	mm_free_pgd(mm);  fail_nopgd: @@ -2043,15 +2065,6 @@ static __latent_entropy struct task_struct *copy_process(  			return ERR_PTR(-EINVAL);  	} -	/* -	 * If the new process will be in a different time namespace -	 * do not allow it to share VM or a thread group with the forking task. -	 */ -	if (clone_flags & (CLONE_THREAD | CLONE_VM)) { -		if (nsp->time_ns != nsp->time_ns_for_children) -			return ERR_PTR(-EINVAL); -	} -  	if (clone_flags & CLONE_PIDFD) {  		/*  		 * - CLONE_DETACHED is blocked so that we can potentially @@ -2406,12 +2419,6 @@ static __latent_entropy struct task_struct *copy_process(  	spin_lock(¤t->sighand->siglock); -	/* -	 * Copy seccomp details explicitly here, in case they were changed -	 * before holding sighand lock. -	 */ -	copy_seccomp(p); -  	rv_task_fork(p);  	rseq_fork(p, clone_flags); @@ -2428,6 +2435,14 @@ static __latent_entropy struct task_struct *copy_process(  		goto bad_fork_cancel_cgroup;  	} +	/* No more failure paths after this point. */ + +	/* +	 * Copy seccomp details explicitly here, in case they were changed +	 * before holding sighand lock. +	 */ +	copy_seccomp(p); +  	init_task_pid_links(p);  	if (likely(p->pid)) {  		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); |