diff options
Diffstat (limited to 'mm/memory.c')
| -rw-r--r-- | mm/memory.c | 35 | 
1 files changed, 27 insertions, 8 deletions
| diff --git a/mm/memory.c b/mm/memory.c index 0bccc622e482..e8bfdf0d9d1d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2257,7 +2257,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src,  	bool ret;  	void *kaddr;  	void __user *uaddr; -	bool force_mkyoung; +	bool locked = false;  	struct vm_area_struct *vma = vmf->vma;  	struct mm_struct *mm = vma->vm_mm;  	unsigned long addr = vmf->address; @@ -2282,11 +2282,11 @@ static inline bool cow_user_page(struct page *dst, struct page *src,  	 * On architectures with software "accessed" bits, we would  	 * take a double page fault, so mark it accessed here.  	 */ -	force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf->orig_pte); -	if (force_mkyoung) { +	if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {  		pte_t entry;  		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); +		locked = true;  		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {  			/*  			 * Other thread has already handled the fault @@ -2310,18 +2310,37 @@ static inline bool cow_user_page(struct page *dst, struct page *src,  	 * zeroes.  	 */  	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { +		if (locked) +			goto warn; + +		/* Re-validate under PTL if the page is still mapped */ +		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); +		locked = true; +		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { +			/* The PTE changed under us. Retry page fault. */ +			ret = false; +			goto pte_unlock; +		} +  		/* -		 * Give a warn in case there can be some obscure -		 * use-case +		 * The same page can be mapped back since last copy attampt. +		 * Try to copy again under PTL.  		 */ -		WARN_ON_ONCE(1); -		clear_page(kaddr); +		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { +			/* +			 * Give a warn in case there can be some obscure +			 * use-case +			 */ +warn: +			WARN_ON_ONCE(1); +			clear_page(kaddr); +		}  	}  	ret = true;  pte_unlock: -	if (force_mkyoung) +	if (locked)  		pte_unmap_unlock(vmf->pte, vmf->ptl);  	kunmap_atomic(kaddr);  	flush_dcache_page(dst); |