diff options
Diffstat (limited to 'mm/hugetlb.c')
| -rw-r--r-- | mm/hugetlb.c | 25 | 
1 files changed, 15 insertions, 10 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 418bf01a50ed..3edb759c5c7d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3286,6 +3286,11 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,  	BUG_ON(start & ~huge_page_mask(h));  	BUG_ON(end & ~huge_page_mask(h)); +	/* +	 * This is a hugetlb vma, all the pte entries should point +	 * to huge page. +	 */ +	tlb_remove_check_page_size_change(tlb, sz);  	tlb_start_vma(tlb, vma);  	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);  	address = start; @@ -3336,7 +3341,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,  		}  		pte = huge_ptep_get_and_clear(mm, address, ptep); -		tlb_remove_tlb_entry(tlb, ptep, address); +		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);  		if (huge_pte_dirty(pte))  			set_page_dirty(page); @@ -3450,15 +3455,17 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,   * Keep the pte_same checks anyway to make transition from the mutex easier.   */  static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, -			unsigned long address, pte_t *ptep, pte_t pte, -			struct page *pagecache_page, spinlock_t *ptl) +		       unsigned long address, pte_t *ptep, +		       struct page *pagecache_page, spinlock_t *ptl)  { +	pte_t pte;  	struct hstate *h = hstate_vma(vma);  	struct page *old_page, *new_page;  	int ret = 0, outside_reserve = 0;  	unsigned long mmun_start;	/* For mmu_notifiers */  	unsigned long mmun_end;		/* For mmu_notifiers */ +	pte = huge_ptep_get(ptep);  	old_page = pte_page(pte);  retry_avoidcopy: @@ -3711,8 +3718,7 @@ retry:  		vma_end_reservation(h, vma, address);  	} -	ptl = huge_pte_lockptr(h, mm, ptep); -	spin_lock(ptl); +	ptl = huge_pte_lock(h, mm, ptep);  	size = i_size_read(mapping->host) >> huge_page_shift(h);  	if (idx >= size)  		goto backout; @@ -3733,7 +3739,7 @@ retry:  	hugetlb_count_add(pages_per_huge_page(h), mm);  	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {  		/* Optimization, do the COW without a second fault */ -		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl); +		ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);  	}  	spin_unlock(ptl); @@ -3888,8 +3894,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,  	if (flags & FAULT_FLAG_WRITE) {  		if (!huge_pte_write(entry)) { -			ret = hugetlb_cow(mm, vma, address, ptep, entry, -					pagecache_page, ptl); +			ret = hugetlb_cow(mm, vma, address, ptep, +					  pagecache_page, ptl);  			goto out_put_page;  		}  		entry = huge_pte_mkdirty(entry); @@ -4330,8 +4336,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)  	if (!spte)  		goto out; -	ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte); -	spin_lock(ptl); +	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);  	if (pud_none(*pud)) {  		pud_populate(mm, pud,  				(pmd_t *)((unsigned long)spte & PAGE_MASK));  |