diff options
Diffstat (limited to 'mm/oom_kill.c')
| -rw-r--r-- | mm/oom_kill.c | 26 | 
1 files changed, 14 insertions, 12 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index eda2e2a0bdc6..71e3acea7817 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -73,7 +73,7 @@ static inline bool is_memcg_oom(struct oom_control *oc)  /**   * oom_cpuset_eligible() - check task eligiblity for kill   * @start: task struct of which task to consider - * @mask: nodemask passed to page allocator for mempolicy ooms + * @oc: pointer to struct oom_control   *   * Task eligibility is determined by whether or not a candidate task, @tsk,   * shares the same mempolicy nodes as current if it is bound by such a policy @@ -287,7 +287,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc)  	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {  		oc->totalpages = total_swap_pages;  		for_each_node_mask(nid, *oc->nodemask) -			oc->totalpages += node_spanned_pages(nid); +			oc->totalpages += node_present_pages(nid);  		return CONSTRAINT_MEMORY_POLICY;  	} @@ -300,7 +300,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc)  	if (cpuset_limited) {  		oc->totalpages = total_swap_pages;  		for_each_node_mask(nid, cpuset_current_mems_allowed) -			oc->totalpages += node_spanned_pages(nid); +			oc->totalpages += node_present_pages(nid);  		return CONSTRAINT_CPUSET;  	}  	return CONSTRAINT_NONE; @@ -523,7 +523,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm)  	set_bit(MMF_UNSTABLE, &mm->flags);  	for (vma = mm->mmap ; vma; vma = vma->vm_next) { -		if (!can_madv_dontneed_vma(vma)) +		if (!can_madv_lru_vma(vma))  			continue;  		/* @@ -884,12 +884,13 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)  	 */  	do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);  	mark_oom_victim(victim); -	pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", -		message, task_pid_nr(victim), victim->comm, -		K(victim->mm->total_vm), -		K(get_mm_counter(victim->mm, MM_ANONPAGES)), -		K(get_mm_counter(victim->mm, MM_FILEPAGES)), -		K(get_mm_counter(victim->mm, MM_SHMEMPAGES))); +	pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n", +		message, task_pid_nr(victim), victim->comm, K(mm->total_vm), +		K(get_mm_counter(mm, MM_ANONPAGES)), +		K(get_mm_counter(mm, MM_FILEPAGES)), +		K(get_mm_counter(mm, MM_SHMEMPAGES)), +		from_kuid(&init_user_ns, task_uid(victim)), +		mm_pgtables_bytes(mm), victim->signal->oom_score_adj);  	task_unlock(victim);  	/* @@ -1068,9 +1069,10 @@ bool out_of_memory(struct oom_control *oc)  	 * The OOM killer does not compensate for IO-less reclaim.  	 * pagefault_out_of_memory lost its gfp context so we have to  	 * make sure exclude 0 mask - all other users should have at least -	 * ___GFP_DIRECT_RECLAIM to get here. +	 * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to +	 * invoke the OOM killer even if it is a GFP_NOFS allocation.  	 */ -	if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS)) +	if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))  		return true;  	/*  |