diff options
Diffstat (limited to 'mm/migrate.c')
| -rw-r--r-- | mm/migrate.c | 37 | 
1 files changed, 23 insertions, 14 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index a16b15090df3..ac6f4939bb59 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -100,7 +100,7 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode)  	/*  	 * Check PageMovable before holding a PG_lock because page's owner  	 * assumes anybody doesn't touch PG_lock of newly allocated page -	 * so unconditionally grapping the lock ruins page's owner side. +	 * so unconditionally grabbing the lock ruins page's owner side.  	 */  	if (unlikely(!__PageMovable(page)))  		goto out_putpage; @@ -374,7 +374,7 @@ unlock:  }  #endif -static int expected_page_refs(struct page *page) +static int expected_page_refs(struct address_space *mapping, struct page *page)  {  	int expected_count = 1; @@ -384,7 +384,7 @@ static int expected_page_refs(struct page *page)  	 */  	expected_count += is_device_private_page(page);  	expected_count += is_device_public_page(page); -	if (page_mapping(page)) +	if (mapping)  		expected_count += hpage_nr_pages(page) + page_has_private(page);  	return expected_count; @@ -405,7 +405,7 @@ int migrate_page_move_mapping(struct address_space *mapping,  	XA_STATE(xas, &mapping->i_pages, page_index(page));  	struct zone *oldzone, *newzone;  	int dirty; -	int expected_count = expected_page_refs(page) + extra_count; +	int expected_count = expected_page_refs(mapping, page) + extra_count;  	if (!mapping) {  		/* Anonymous page without mapping */ @@ -709,7 +709,6 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,  	/* Simple case, sync compaction */  	if (mode != MIGRATE_ASYNC) {  		do { -			get_bh(bh);  			lock_buffer(bh);  			bh = bh->b_this_page; @@ -720,18 +719,15 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,  	/* async case, we cannot block on lock_buffer so use trylock_buffer */  	do { -		get_bh(bh);  		if (!trylock_buffer(bh)) {  			/*  			 * We failed to lock the buffer and cannot stall in  			 * async migration. Release the taken locks  			 */  			struct buffer_head *failed_bh = bh; -			put_bh(failed_bh);  			bh = head;  			while (bh != failed_bh) {  				unlock_buffer(bh); -				put_bh(bh);  				bh = bh->b_this_page;  			}  			return false; @@ -754,7 +750,7 @@ static int __buffer_migrate_page(struct address_space *mapping,  		return migrate_page(mapping, newpage, page, mode);  	/* Check whether page does not have extra refs before we do more work */ -	expected_count = expected_page_refs(page); +	expected_count = expected_page_refs(mapping, page);  	if (page_count(page) != expected_count)  		return -EAGAIN; @@ -818,7 +814,6 @@ unlock_buffers:  	bh = head;  	do {  		unlock_buffer(bh); -		put_bh(bh);  		bh = bh->b_this_page;  	} while (bh != head); @@ -916,7 +911,7 @@ static int fallback_migrate_page(struct address_space *mapping,  	 */  	if (page_has_private(page) &&  	    !try_to_release_page(page, GFP_KERNEL)) -		return -EAGAIN; +		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;  	return migrate_page(mapping, newpage, page, mode);  } @@ -1135,10 +1130,13 @@ out:  	 * If migration is successful, decrease refcount of the newpage  	 * which will not free the page because new page owner increased  	 * refcounter. As well, if it is LRU page, add the page to LRU -	 * list in here. +	 * list in here. Use the old state of the isolated source page to +	 * determine if we migrated a LRU page. newpage was already unlocked +	 * and possibly modified by its owner - don't rely on the page +	 * state.  	 */  	if (rc == MIGRATEPAGE_SUCCESS) { -		if (unlikely(__PageMovable(newpage))) +		if (unlikely(!is_lru))  			put_page(newpage);  		else  			putback_lru_page(newpage); @@ -1289,7 +1287,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,  	struct anon_vma *anon_vma = NULL;  	/* -	 * Movability of hugepages depends on architectures and hugepage size. +	 * Migratability of hugepages depends on architectures and their size.  	 * This check is necessary because some callers of hugepage migration  	 * like soft offline and memory hotremove don't walk through page  	 * tables or check whether the hugepage is pmd-based or not before @@ -1317,6 +1315,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,  		lock_page(hpage);  	} +	/* +	 * Check for pages which are in the process of being freed.  Without +	 * page_mapping() set, hugetlbfs specific move page routine will not +	 * be called and we could leak usage counts for subpools. +	 */ +	if (page_private(hpage) && !page_mapping(hpage)) { +		rc = -EBUSY; +		goto out_unlock; +	} +  	if (PageAnon(hpage))  		anon_vma = page_get_anon_vma(hpage); @@ -1347,6 +1355,7 @@ put_anon:  		put_new_page = NULL;  	} +out_unlock:  	unlock_page(hpage);  out:  	if (rc != -EAGAIN)  |