diff options
Diffstat (limited to 'mm/hugetlb.c')
| -rw-r--r-- | mm/hugetlb.c | 14 | 
1 files changed, 12 insertions, 2 deletions
| diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 07abcb6eb203..245038a9fe4e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5478,7 +5478,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,  		       struct folio *pagecache_folio, spinlock_t *ptl)  {  	const bool unshare = flags & FAULT_FLAG_UNSHARE; -	pte_t pte; +	pte_t pte = huge_ptep_get(ptep);  	struct hstate *h = hstate_vma(vma);  	struct page *old_page;  	struct folio *new_folio; @@ -5488,6 +5488,17 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,  	struct mmu_notifier_range range;  	/* +	 * Never handle CoW for uffd-wp protected pages.  It should be only +	 * handled when the uffd-wp protection is removed. +	 * +	 * Note that only the CoW optimization path (in hugetlb_no_page()) +	 * can trigger this, because hugetlb_fault() will always resolve +	 * uffd-wp bit first. +	 */ +	if (!unshare && huge_pte_uffd_wp(pte)) +		return 0; + +	/*  	 * hugetlb does not support FOLL_FORCE-style write faults that keep the  	 * PTE mapped R/O such as maybe_mkwrite() would do.  	 */ @@ -5500,7 +5511,6 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,  		return 0;  	} -	pte = huge_ptep_get(ptep);  	old_page = pte_page(pte);  	delayacct_wpcopy_start(); |