diff options
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 53 | 
1 files changed, 43 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9c9194959271..15c2050c629b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -670,6 +670,7 @@ out:  void free_compound_page(struct page *page)  { +	mem_cgroup_uncharge(page);  	__free_pages_ok(page, compound_order(page));  } @@ -3511,7 +3512,7 @@ bool zone_watermark_ok_safe(struct zone *z, unsigned int order,  static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)  {  	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= -				RECLAIM_DISTANCE; +				node_reclaim_distance;  }  #else	/* CONFIG_NUMA */  static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) @@ -3955,14 +3956,22 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,  		goto check_priority;  	/* +	 * compaction was skipped because there are not enough order-0 pages +	 * to work with, so we retry only if it looks like reclaim can help. +	 */ +	if (compaction_needs_reclaim(compact_result)) { +		ret = compaction_zonelist_suitable(ac, order, alloc_flags); +		goto out; +	} + +	/*  	 * make sure the compaction wasn't deferred or didn't bail out early  	 * due to locks contention before we declare that we should give up. -	 * But do not retry if the given zonelist is not suitable for -	 * compaction. +	 * But the next retry should use a higher priority if allowed, so +	 * we don't just keep bailing out endlessly.  	 */  	if (compaction_withdrawn(compact_result)) { -		ret = compaction_zonelist_suitable(ac, order, alloc_flags); -		goto out; +		goto check_priority;  	}  	/* @@ -4458,6 +4467,28 @@ retry_cpuset:  		if (page)  			goto got_pg; +		 if (order >= pageblock_order && (gfp_mask & __GFP_IO)) { +			/* +			 * If allocating entire pageblock(s) and compaction +			 * failed because all zones are below low watermarks +			 * or is prohibited because it recently failed at this +			 * order, fail immediately. +			 * +			 * Reclaim is +			 *  - potentially very expensive because zones are far +			 *    below their low watermarks or this is part of very +			 *    bursty high order allocations, +			 *  - not guaranteed to help because isolate_freepages() +			 *    may not iterate over freed pages as part of its +			 *    linear scan, and +			 *  - unlikely to make entire pageblocks free on its +			 *    own. +			 */ +			if (compact_result == COMPACT_SKIPPED || +			    compact_result == COMPACT_DEFERRED) +				goto nopage; +		} +  		/*  		 * Checks for costly allocations with __GFP_NORETRY, which  		 * includes THP page fault allocations @@ -5971,7 +6002,7 @@ void __ref memmap_init_zone_device(struct zone *zone,  		}  	} -	pr_info("%s initialised, %lu pages in %ums\n", dev_name(pgmap->dev), +	pr_info("%s initialised %lu pages in %ums\n", __func__,  		size, jiffies_to_msecs(jiffies - start));  } @@ -6638,9 +6669,11 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages,  #ifdef CONFIG_TRANSPARENT_HUGEPAGE  static void pgdat_init_split_queue(struct pglist_data *pgdat)  { -	spin_lock_init(&pgdat->split_queue_lock); -	INIT_LIST_HEAD(&pgdat->split_queue); -	pgdat->split_queue_len = 0; +	struct deferred_split *ds_queue = &pgdat->deferred_split_queue; + +	spin_lock_init(&ds_queue->split_queue_lock); +	INIT_LIST_HEAD(&ds_queue->split_queue); +	ds_queue->split_queue_len = 0;  }  #else  static void pgdat_init_split_queue(struct pglist_data *pgdat) {} @@ -8196,7 +8229,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,  			if (!hugepage_migration_supported(page_hstate(head)))  				goto unmovable; -			skip_pages = (1 << compound_order(head)) - (page - head); +			skip_pages = compound_nr(head) - (page - head);  			iter += skip_pages - 1;  			continue;  		}  |