diff options
Diffstat (limited to 'fs/hugetlbfs/inode.c')
| -rw-r--r-- | fs/hugetlbfs/inode.c | 46 | 
1 files changed, 18 insertions, 28 deletions
| diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 28d2753be094..ed113ea17aff 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -334,7 +334,7 @@ static void remove_huge_page(struct page *page)  }  static void -hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end) +hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)  {  	struct vm_area_struct *vma; @@ -401,9 +401,8 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,  	const pgoff_t end = lend >> huge_page_shift(h);  	struct vm_area_struct pseudo_vma;  	struct pagevec pvec; -	pgoff_t next; +	pgoff_t next, index;  	int i, freed = 0; -	long lookup_nr = PAGEVEC_SIZE;  	bool truncate_op = (lend == LLONG_MAX);  	memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); @@ -412,33 +411,19 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,  	next = start;  	while (next < end) {  		/* -		 * Don't grab more pages than the number left in the range. -		 */ -		if (end - next < lookup_nr) -			lookup_nr = end - next; - -		/*  		 * When no more pages are found, we are done.  		 */ -		if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) +		if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))  			break;  		for (i = 0; i < pagevec_count(&pvec); ++i) {  			struct page *page = pvec.pages[i];  			u32 hash; -			/* -			 * The page (index) could be beyond end.  This is -			 * only possible in the punch hole case as end is -			 * max page offset in the truncate case. -			 */ -			next = page->index; -			if (next >= end) -				break; - +			index = page->index;  			hash = hugetlb_fault_mutex_hash(h, current->mm,  							&pseudo_vma, -							mapping, next, 0); +							mapping, index, 0);  			mutex_lock(&hugetlb_fault_mutex_table[hash]);  			/* @@ -455,8 +440,8 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,  				i_mmap_lock_write(mapping);  				hugetlb_vmdelete_list(&mapping->i_mmap, -					next * pages_per_huge_page(h), -					(next + 1) * pages_per_huge_page(h)); +					index * pages_per_huge_page(h), +					(index + 1) * pages_per_huge_page(h));  				i_mmap_unlock_write(mapping);  			} @@ -475,14 +460,13 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,  			freed++;  			if (!truncate_op) {  				if (unlikely(hugetlb_unreserve_pages(inode, -							next, next + 1, 1))) +							index, index + 1, 1)))  					hugetlb_fix_reserve_counts(inode);  			}  			unlock_page(page);  			mutex_unlock(&hugetlb_fault_mutex_table[hash]);  		} -		++next;  		huge_pagevec_release(&pvec);  		cond_resched();  	} @@ -514,7 +498,7 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)  	i_size_write(inode, offset);  	i_mmap_lock_write(mapping); -	if (!RB_EMPTY_ROOT(&mapping->i_mmap)) +	if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))  		hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);  	i_mmap_unlock_write(mapping);  	remove_inode_hugepages(inode, offset, LLONG_MAX); @@ -539,7 +523,7 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)  		inode_lock(inode);  		i_mmap_lock_write(mapping); -		if (!RB_EMPTY_ROOT(&mapping->i_mmap)) +		if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))  			hugetlb_vmdelete_list(&mapping->i_mmap,  						hole_start >> PAGE_SHIFT,  						hole_end  >> PAGE_SHIFT); @@ -846,7 +830,10 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,  	rc = migrate_huge_page_move_mapping(mapping, newpage, page);  	if (rc != MIGRATEPAGE_SUCCESS)  		return rc; -	migrate_page_copy(newpage, page); +	if (mode != MIGRATE_SYNC_NO_COPY) +		migrate_page_copy(newpage, page); +	else +		migrate_page_states(newpage, page);  	return MIGRATEPAGE_SUCCESS;  } @@ -855,9 +842,12 @@ static int hugetlbfs_error_remove_page(struct address_space *mapping,  				struct page *page)  {  	struct inode *inode = mapping->host; +	pgoff_t index = page->index;  	remove_huge_page(page); -	hugetlb_fix_reserve_counts(inode); +	if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) +		hugetlb_fix_reserve_counts(inode); +  	return 0;  } |