diff options
Diffstat (limited to 'mm/rmap.c')
| -rw-r--r-- | mm/rmap.c | 102 | 
1 files changed, 68 insertions, 34 deletions
diff --git a/mm/rmap.c b/mm/rmap.c index 0ea5d9071b32..709bc83703b1 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -27,7 +27,7 @@   *         mapping->i_mmap_rwsem   *           anon_vma->rwsem   *             mm->page_table_lock or pte_lock - *               zone->lru_lock (in mark_page_accessed, isolate_lru_page) + *               zone_lru_lock (in mark_page_accessed, isolate_lru_page)   *               swap_lock (in swap_duplicate, swap_info_get)   *                 mmlist_lock (in mmput, drain_mmlist and others)   *                 mapping->private_lock (in __set_page_dirty_buffers) @@ -1084,23 +1084,20 @@ EXPORT_SYMBOL_GPL(page_mkclean);   * page_move_anon_rmap - move a page to our anon_vma   * @page:	the page to move to our anon_vma   * @vma:	the vma the page belongs to - * @address:	the user virtual address mapped   *   * When a page belongs exclusively to one process after a COW event,   * that page can be moved into the anon_vma that belongs to just that   * process, so the rmap code will not search the parent or sibling   * processes.   */ -void page_move_anon_rmap(struct page *page, -	struct vm_area_struct *vma, unsigned long address) +void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)  {  	struct anon_vma *anon_vma = vma->anon_vma; +	page = compound_head(page); +  	VM_BUG_ON_PAGE(!PageLocked(page), page);  	VM_BUG_ON_VMA(!anon_vma, vma); -	if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page)) -		address &= HPAGE_PMD_MASK; -	VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);  	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;  	/* @@ -1215,11 +1212,9 @@ void do_page_add_anon_rmap(struct page *page,  		 * pte lock(a spinlock) is held, which implies preemption  		 * disabled.  		 */ -		if (compound) { -			__inc_zone_page_state(page, -					      NR_ANON_TRANSPARENT_HUGEPAGES); -		} -		__mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr); +		if (compound) +			__inc_node_page_state(page, NR_ANON_THPS); +		__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);  	}  	if (unlikely(PageKsm(page)))  		return; @@ -1256,14 +1251,14 @@ void page_add_new_anon_rmap(struct page *page,  		VM_BUG_ON_PAGE(!PageTransHuge(page), page);  		/* increment count (starts at -1) */  		atomic_set(compound_mapcount_ptr(page), 0); -		__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); +		__inc_node_page_state(page, NR_ANON_THPS);  	} else {  		/* Anon THP always mapped first with PMD */  		VM_BUG_ON_PAGE(PageTransCompound(page), page);  		/* increment count (starts at -1) */  		atomic_set(&page->_mapcount, 0);  	} -	__mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr); +	__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);  	__page_set_anon_rmap(page, vma, address, 1);  } @@ -1273,18 +1268,42 @@ void page_add_new_anon_rmap(struct page *page,   *   * The caller needs to hold the pte lock.   */ -void page_add_file_rmap(struct page *page) +void page_add_file_rmap(struct page *page, bool compound)  { +	int i, nr = 1; + +	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);  	lock_page_memcg(page); -	if (atomic_inc_and_test(&page->_mapcount)) { -		__inc_zone_page_state(page, NR_FILE_MAPPED); -		mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); +	if (compound && PageTransHuge(page)) { +		for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { +			if (atomic_inc_and_test(&page[i]._mapcount)) +				nr++; +		} +		if (!atomic_inc_and_test(compound_mapcount_ptr(page))) +			goto out; +		VM_BUG_ON_PAGE(!PageSwapBacked(page), page); +		__inc_node_page_state(page, NR_SHMEM_PMDMAPPED); +	} else { +		if (PageTransCompound(page)) { +			VM_BUG_ON_PAGE(!PageLocked(page), page); +			SetPageDoubleMap(compound_head(page)); +			if (PageMlocked(page)) +				clear_page_mlock(compound_head(page)); +		} +		if (!atomic_inc_and_test(&page->_mapcount)) +			goto out;  	} +	__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr); +	mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); +out:  	unlock_page_memcg(page);  } -static void page_remove_file_rmap(struct page *page) +static void page_remove_file_rmap(struct page *page, bool compound)  { +	int i, nr = 1; + +	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);  	lock_page_memcg(page);  	/* Hugepages are not counted in NR_FILE_MAPPED for now. */ @@ -1295,15 +1314,26 @@ static void page_remove_file_rmap(struct page *page)  	}  	/* page still mapped by someone else? */ -	if (!atomic_add_negative(-1, &page->_mapcount)) -		goto out; +	if (compound && PageTransHuge(page)) { +		for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { +			if (atomic_add_negative(-1, &page[i]._mapcount)) +				nr++; +		} +		if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) +			goto out; +		VM_BUG_ON_PAGE(!PageSwapBacked(page), page); +		__dec_node_page_state(page, NR_SHMEM_PMDMAPPED); +	} else { +		if (!atomic_add_negative(-1, &page->_mapcount)) +			goto out; +	}  	/* -	 * We use the irq-unsafe __{inc|mod}_zone_page_stat because +	 * We use the irq-unsafe __{inc|mod}_zone_page_state because  	 * these counters are not modified in interrupt context, and  	 * pte lock(a spinlock) is held, which implies preemption disabled.  	 */ -	__dec_zone_page_state(page, NR_FILE_MAPPED); +	__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);  	mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);  	if (unlikely(PageMlocked(page))) @@ -1326,7 +1356,7 @@ static void page_remove_anon_compound_rmap(struct page *page)  	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))  		return; -	__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); +	__dec_node_page_state(page, NR_ANON_THPS);  	if (TestClearPageDoubleMap(page)) {  		/* @@ -1345,7 +1375,7 @@ static void page_remove_anon_compound_rmap(struct page *page)  		clear_page_mlock(page);  	if (nr) { -		__mod_zone_page_state(page_zone(page), NR_ANON_PAGES, -nr); +		__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);  		deferred_split_huge_page(page);  	}  } @@ -1359,11 +1389,8 @@ static void page_remove_anon_compound_rmap(struct page *page)   */  void page_remove_rmap(struct page *page, bool compound)  { -	if (!PageAnon(page)) { -		VM_BUG_ON_PAGE(compound && !PageHuge(page), page); -		page_remove_file_rmap(page); -		return; -	} +	if (!PageAnon(page)) +		return page_remove_file_rmap(page, compound);  	if (compound)  		return page_remove_anon_compound_rmap(page); @@ -1377,7 +1404,7 @@ void page_remove_rmap(struct page *page, bool compound)  	 * these counters are not modified in interrupt context, and  	 * pte lock(a spinlock) is held, which implies preemption disabled.  	 */ -	__dec_zone_page_state(page, NR_ANON_PAGES); +	__dec_node_page_state(page, NR_ANON_MAPPED);  	if (unlikely(PageMlocked(page)))  		clear_page_mlock(page); @@ -1427,7 +1454,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,  			goto out;  	} -	pte = page_check_address(page, mm, address, &ptl, 0); +	pte = page_check_address(page, mm, address, &ptl, +				 PageTransCompound(page));  	if (!pte)  		goto out; @@ -1438,8 +1466,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,  	 */  	if (!(flags & TTU_IGNORE_MLOCK)) {  		if (vma->vm_flags & VM_LOCKED) { -			/* Holding pte lock, we do *not* need mmap_sem here */ -			mlock_vma_page(page); +			/* PTE-mapped THP are never mlocked */ +			if (!PageTransCompound(page)) { +				/* +				 * Holding pte lock, we do *not* need +				 * mmap_sem here +				 */ +				mlock_vma_page(page); +			}  			ret = SWAP_MLOCK;  			goto out_unmap;  		}  |