diff options
Diffstat (limited to 'mm/huge_memory.c')
| -rw-r--r-- | mm/huge_memory.c | 42 | 
1 files changed, 16 insertions, 26 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 86fe697e8bfb..2f2f5e774902 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -842,20 +842,15 @@ EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);  #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */  static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, -		pmd_t *pmd) +		pmd_t *pmd, int flags)  {  	pmd_t _pmd; -	/* -	 * We should set the dirty bit only for FOLL_WRITE but for now -	 * the dirty bit in the pmd is meaningless.  And if the dirty -	 * bit will become meaningful and we'll only set it with -	 * FOLL_WRITE, an atomic set_bit will be required on the pmd to -	 * set the young bit, instead of the current set_pmd_at. -	 */ -	_pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); +	_pmd = pmd_mkyoung(*pmd); +	if (flags & FOLL_WRITE) +		_pmd = pmd_mkdirty(_pmd);  	if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, -				pmd, _pmd,  1)) +				pmd, _pmd, flags & FOLL_WRITE))  		update_mmu_cache_pmd(vma, addr, pmd);  } @@ -875,7 +870,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,  	 */  	WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); -	if (flags & FOLL_WRITE && !pmd_write(*pmd)) +	if (!pmd_access_permitted(*pmd, flags & FOLL_WRITE))  		return NULL;  	if (pmd_present(*pmd) && pmd_devmap(*pmd)) @@ -884,7 +879,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,  		return NULL;  	if (flags & FOLL_TOUCH) -		touch_pmd(vma, addr, pmd); +		touch_pmd(vma, addr, pmd, flags);  	/*  	 * device mapped pages can only be returned if the @@ -995,20 +990,15 @@ out:  #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD  static void touch_pud(struct vm_area_struct *vma, unsigned long addr, -		pud_t *pud) +		pud_t *pud, int flags)  {  	pud_t _pud; -	/* -	 * We should set the dirty bit only for FOLL_WRITE but for now -	 * the dirty bit in the pud is meaningless.  And if the dirty -	 * bit will become meaningful and we'll only set it with -	 * FOLL_WRITE, an atomic set_bit will be required on the pud to -	 * set the young bit, instead of the current set_pud_at. -	 */ -	_pud = pud_mkyoung(pud_mkdirty(*pud)); +	_pud = pud_mkyoung(*pud); +	if (flags & FOLL_WRITE) +		_pud = pud_mkdirty(_pud);  	if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, -				pud, _pud,  1)) +				pud, _pud, flags & FOLL_WRITE))  		update_mmu_cache_pud(vma, addr, pud);  } @@ -1022,7 +1012,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,  	assert_spin_locked(pud_lockptr(mm, pud)); -	if (flags & FOLL_WRITE && !pud_write(*pud)) +	if (!pud_access_permitted(*pud, flags & FOLL_WRITE))  		return NULL;  	if (pud_present(*pud) && pud_devmap(*pud)) @@ -1031,7 +1021,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,  		return NULL;  	if (flags & FOLL_TOUCH) -		touch_pud(vma, addr, pud); +		touch_pud(vma, addr, pud, flags);  	/*  	 * device mapped pages can only be returned if the @@ -1396,7 +1386,7 @@ out_unlock:   */  static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)  { -	return pmd_write(pmd) || +	return pmd_access_permitted(pmd, WRITE) ||  	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));  } @@ -1424,7 +1414,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,  	page = pmd_page(*pmd);  	VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);  	if (flags & FOLL_TOUCH) -		touch_pmd(vma, addr, pmd); +		touch_pmd(vma, addr, pmd, flags);  	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {  		/*  		 * We don't mlock() pte-mapped THPs. This way we can avoid  |