diff options
Diffstat (limited to 'mm/vmalloc.c')
| -rw-r--r-- | mm/vmalloc.c | 15 | 
1 files changed, 9 insertions, 6 deletions
| diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d77830ff604c..e8a807c78110 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2816,6 +2816,8 @@ vm_area_alloc_pages(gfp_t gfp, int nid,  		unsigned int order, unsigned int nr_pages, struct page **pages)  {  	unsigned int nr_allocated = 0; +	struct page *page; +	int i;  	/*  	 * For order-0 pages we make use of bulk allocator, if @@ -2823,7 +2825,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,  	 * to fails, fallback to a single page allocator that is  	 * more permissive.  	 */ -	if (!order) { +	if (!order && nid != NUMA_NO_NODE) {  		while (nr_allocated < nr_pages) {  			unsigned int nr, nr_pages_request; @@ -2848,7 +2850,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,  			if (nr != nr_pages_request)  				break;  		} -	} else +	} else if (order)  		/*  		 * Compound pages required for remap_vmalloc_page if  		 * high-order pages. @@ -2856,11 +2858,12 @@ vm_area_alloc_pages(gfp_t gfp, int nid,  		gfp |= __GFP_COMP;  	/* High-order pages or fallback path if "bulk" fails. */ -	while (nr_allocated < nr_pages) { -		struct page *page; -		int i; -		page = alloc_pages_node(nid, gfp, order); +	while (nr_allocated < nr_pages) { +		if (nid == NUMA_NO_NODE) +			page = alloc_pages(gfp, order); +		else +			page = alloc_pages_node(nid, gfp, order);  		if (unlikely(!page))  			break; |