diff options
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 52 | 
1 files changed, 21 insertions, 31 deletions
| diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 28f80daf5c04..c565de8f48e9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -287,7 +287,7 @@ EXPORT_SYMBOL(nr_online_nodes);  static bool page_contains_unaccepted(struct page *page, unsigned int order);  static void accept_page(struct page *page, unsigned int order); -static bool try_to_accept_memory(struct zone *zone, unsigned int order); +static bool cond_accept_memory(struct zone *zone, unsigned int order);  static inline bool has_unaccepted_memory(void);  static bool __free_unaccepted(struct page *page); @@ -3072,9 +3072,6 @@ static inline long __zone_watermark_unusable_free(struct zone *z,  	if (!(alloc_flags & ALLOC_CMA))  		unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);  #endif -#ifdef CONFIG_UNACCEPTED_MEMORY -	unusable_free += zone_page_state(z, NR_UNACCEPTED); -#endif  	return unusable_free;  } @@ -3368,6 +3365,8 @@ retry:  			}  		} +		cond_accept_memory(zone, order); +  		/*  		 * Detect whether the number of free pages is below high  		 * watermark.  If so, we will decrease pcp->high and free @@ -3393,10 +3392,8 @@ check_alloc_wmark:  				       gfp_mask)) {  			int ret; -			if (has_unaccepted_memory()) { -				if (try_to_accept_memory(zone, order)) -					goto try_this_zone; -			} +			if (cond_accept_memory(zone, order)) +				goto try_this_zone;  #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT  			/* @@ -3450,10 +3447,8 @@ try_this_zone:  			return page;  		} else { -			if (has_unaccepted_memory()) { -				if (try_to_accept_memory(zone, order)) -					goto try_this_zone; -			} +			if (cond_accept_memory(zone, order)) +				goto try_this_zone;  #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT  			/* Try again if zone has deferred pages */ @@ -5755,7 +5750,6 @@ void __init setup_per_cpu_pageset(void)  	for_each_online_pgdat(pgdat)  		pgdat->per_cpu_nodestats =  			alloc_percpu(struct per_cpu_nodestat); -	store_early_perpage_metadata();  }  __meminit void zone_pcp_init(struct zone *zone) @@ -5821,14 +5815,7 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char  void free_reserved_page(struct page *page)  { -	if (mem_alloc_profiling_enabled()) { -		union codetag_ref *ref = get_page_tag_ref(page); - -		if (ref) { -			set_codetag_empty(ref); -			put_page_tag_ref(ref); -		} -	} +	clear_page_tag_ref(page);  	ClearPageReserved(page);  	init_page_count(page);  	__free_page(page); @@ -6951,9 +6938,6 @@ static bool try_to_accept_memory_one(struct zone *zone)  	struct page *page;  	bool last; -	if (list_empty(&zone->unaccepted_pages)) -		return false; -  	spin_lock_irqsave(&zone->lock, flags);  	page = list_first_entry_or_null(&zone->unaccepted_pages,  					struct page, lru); @@ -6979,23 +6963,29 @@ static bool try_to_accept_memory_one(struct zone *zone)  	return true;  } -static bool try_to_accept_memory(struct zone *zone, unsigned int order) +static bool cond_accept_memory(struct zone *zone, unsigned int order)  {  	long to_accept; -	int ret = false; +	bool ret = false; + +	if (!has_unaccepted_memory()) +		return false; + +	if (list_empty(&zone->unaccepted_pages)) +		return false;  	/* How much to accept to get to high watermark? */  	to_accept = high_wmark_pages(zone) -  		    (zone_page_state(zone, NR_FREE_PAGES) - -		    __zone_watermark_unusable_free(zone, order, 0)); +		    __zone_watermark_unusable_free(zone, order, 0) - +		    zone_page_state(zone, NR_UNACCEPTED)); -	/* Accept at least one page */ -	do { +	while (to_accept > 0) {  		if (!try_to_accept_memory_one(zone))  			break;  		ret = true;  		to_accept -= MAX_ORDER_NR_PAGES; -	} while (to_accept > 0); +	}  	return ret;  } @@ -7038,7 +7028,7 @@ static void accept_page(struct page *page, unsigned int order)  {  } -static bool try_to_accept_memory(struct zone *zone, unsigned int order) +static bool cond_accept_memory(struct zone *zone, unsigned int order)  {  	return false;  } |