diff options
Diffstat (limited to 'mm/vmscan.c')
| -rw-r--r-- | mm/vmscan.c | 98 | 
1 files changed, 32 insertions, 66 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index a714c4f800e9..a5ad0b35ab8e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -374,7 +374,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone   */  int prealloc_shrinker(struct shrinker *shrinker)  { -	size_t size = sizeof(*shrinker->nr_deferred); +	unsigned int size = sizeof(*shrinker->nr_deferred);  	if (shrinker->flags & SHRINKER_NUMA_AWARE)  		size *= nr_node_ids; @@ -491,16 +491,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,  		delta = freeable / 2;  	} -	/* -	 * Make sure we apply some minimal pressure on default priority -	 * even on small cgroups. Stale objects are not only consuming memory -	 * by themselves, but can also hold a reference to a dying cgroup, -	 * preventing it from being reclaimed. A dying cgroup with all -	 * corresponding structures like per-cpu stats and kmem caches -	 * can be really big, so it may lead to a significant waste of memory. -	 */ -	delta = max_t(unsigned long long, delta, min(freeable, batch_size)); -  	total_scan += delta;  	if (total_scan < 0) {  		pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", @@ -962,7 +952,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,  		 */  		if (reclaimed && page_is_file_cache(page) &&  		    !mapping_exiting(mapping) && !dax_mapping(mapping)) -			shadow = workingset_eviction(mapping, page); +			shadow = workingset_eviction(page);  		__delete_from_page_cache(page, shadow);  		xa_unlock_irqrestore(&mapping->i_pages, flags); @@ -1116,16 +1106,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,  {  	LIST_HEAD(ret_pages);  	LIST_HEAD(free_pages); -	int pgactivate = 0; -	unsigned nr_unqueued_dirty = 0; -	unsigned nr_dirty = 0; -	unsigned nr_congested = 0;  	unsigned nr_reclaimed = 0; -	unsigned nr_writeback = 0; -	unsigned nr_immediate = 0; -	unsigned nr_ref_keep = 0; -	unsigned nr_unmap_fail = 0; +	memset(stat, 0, sizeof(*stat));  	cond_resched();  	while (!list_empty(page_list)) { @@ -1169,10 +1152,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,  		 */  		page_check_dirty_writeback(page, &dirty, &writeback);  		if (dirty || writeback) -			nr_dirty++; +			stat->nr_dirty++;  		if (dirty && !writeback) -			nr_unqueued_dirty++; +			stat->nr_unqueued_dirty++;  		/*  		 * Treat this page as congested if the underlying BDI is or if @@ -1184,7 +1167,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,  		if (((dirty || writeback) && mapping &&  		     inode_write_congested(mapping->host)) ||  		    (writeback && PageReclaim(page))) -			nr_congested++; +			stat->nr_congested++;  		/*  		 * If a page at the tail of the LRU is under writeback, there @@ -1233,7 +1216,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,  			if (current_is_kswapd() &&  			    PageReclaim(page) &&  			    test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { -				nr_immediate++; +				stat->nr_immediate++;  				goto activate_locked;  			/* Case 2 above */ @@ -1251,7 +1234,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,  				 * and it's also appropriate in global reclaim.  				 */  				SetPageReclaim(page); -				nr_writeback++; +				stat->nr_writeback++;  				goto activate_locked;  			/* Case 3 above */ @@ -1271,7 +1254,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,  		case PAGEREF_ACTIVATE:  			goto activate_locked;  		case PAGEREF_KEEP: -			nr_ref_keep++; +			stat->nr_ref_keep++;  			goto keep_locked;  		case PAGEREF_RECLAIM:  		case PAGEREF_RECLAIM_CLEAN: @@ -1336,7 +1319,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,  			if (unlikely(PageTransHuge(page)))  				flags |= TTU_SPLIT_HUGE_PMD;  			if (!try_to_unmap(page, flags)) { -				nr_unmap_fail++; +				stat->nr_unmap_fail++;  				goto activate_locked;  			}  		} @@ -1484,7 +1467,7 @@ activate_locked:  		VM_BUG_ON_PAGE(PageActive(page), page);  		if (!PageMlocked(page)) {  			SetPageActive(page); -			pgactivate++; +			stat->nr_activate++;  			count_memcg_page_event(page, PGACTIVATE);  		}  keep_locked: @@ -1499,18 +1482,8 @@ keep:  	free_unref_page_list(&free_pages);  	list_splice(&ret_pages, page_list); -	count_vm_events(PGACTIVATE, pgactivate); - -	if (stat) { -		stat->nr_dirty = nr_dirty; -		stat->nr_congested = nr_congested; -		stat->nr_unqueued_dirty = nr_unqueued_dirty; -		stat->nr_writeback = nr_writeback; -		stat->nr_immediate = nr_immediate; -		stat->nr_activate = pgactivate; -		stat->nr_ref_keep = nr_ref_keep; -		stat->nr_unmap_fail = nr_unmap_fail; -	} +	count_vm_events(PGACTIVATE, stat->nr_activate); +  	return nr_reclaimed;  } @@ -1522,6 +1495,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,  		.priority = DEF_PRIORITY,  		.may_unmap = 1,  	}; +	struct reclaim_stat dummy_stat;  	unsigned long ret;  	struct page *page, *next;  	LIST_HEAD(clean_pages); @@ -1535,7 +1509,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,  	}  	ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, -			TTU_IGNORE_ACCESS, NULL, true); +			TTU_IGNORE_ACCESS, &dummy_stat, true);  	list_splice(&clean_pages, page_list);  	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);  	return ret; @@ -1640,8 +1614,8 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,  } -/* - * zone_lru_lock is heavily contended.  Some of the functions that +/** + * pgdat->lru_lock is heavily contended.  Some of the functions that   * shrink the lists perform better by taking out a batch of pages   * and working on them outside the LRU lock.   * @@ -1663,7 +1637,7 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,  static unsigned long isolate_lru_pages(unsigned long nr_to_scan,  		struct lruvec *lruvec, struct list_head *dst,  		unsigned long *nr_scanned, struct scan_control *sc, -		isolate_mode_t mode, enum lru_list lru) +		enum lru_list lru)  {  	struct list_head *src = &lruvec->lists[lru];  	unsigned long nr_taken = 0; @@ -1672,6 +1646,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,  	unsigned long skipped = 0;  	unsigned long scan, total_scan, nr_pages;  	LIST_HEAD(pages_skipped); +	isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);  	scan = 0;  	for (total_scan = 0; @@ -1775,11 +1750,11 @@ int isolate_lru_page(struct page *page)  	WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");  	if (PageLRU(page)) { -		struct zone *zone = page_zone(page); +		pg_data_t *pgdat = page_pgdat(page);  		struct lruvec *lruvec; -		spin_lock_irq(zone_lru_lock(zone)); -		lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); +		spin_lock_irq(&pgdat->lru_lock); +		lruvec = mem_cgroup_page_lruvec(page, pgdat);  		if (PageLRU(page)) {  			int lru = page_lru(page);  			get_page(page); @@ -1787,7 +1762,7 @@ int isolate_lru_page(struct page *page)  			del_page_from_lru_list(page, lruvec, lru);  			ret = 0;  		} -		spin_unlock_irq(zone_lru_lock(zone)); +		spin_unlock_irq(&pgdat->lru_lock);  	}  	return ret;  } @@ -1909,8 +1884,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,  	unsigned long nr_scanned;  	unsigned long nr_reclaimed = 0;  	unsigned long nr_taken; -	struct reclaim_stat stat = {}; -	isolate_mode_t isolate_mode = 0; +	struct reclaim_stat stat;  	int file = is_file_lru(lru);  	struct pglist_data *pgdat = lruvec_pgdat(lruvec);  	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; @@ -1931,13 +1905,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,  	lru_add_drain(); -	if (!sc->may_unmap) -		isolate_mode |= ISOLATE_UNMAPPED; -  	spin_lock_irq(&pgdat->lru_lock);  	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list, -				     &nr_scanned, sc, isolate_mode, lru); +				     &nr_scanned, sc, lru);  	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);  	reclaim_stat->recent_scanned[file] += nr_taken; @@ -2019,9 +1990,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,   * processes, from rmap.   *   * If the pages are mostly unmapped, the processing is fast and it is - * appropriate to hold zone_lru_lock across the whole operation.  But if + * appropriate to hold pgdat->lru_lock across the whole operation.  But if   * the pages are mapped, the processing is slow (page_referenced()) so we - * should drop zone_lru_lock around each page.  It's impossible to balance + * should drop pgdat->lru_lock around each page.  It's impossible to balance   * this, so instead we remove the pages from the LRU while processing them.   * It is safe to rely on PG_active against the non-LRU pages in here because   * nobody will play with that bit on a non-LRU page. @@ -2094,19 +2065,15 @@ static void shrink_active_list(unsigned long nr_to_scan,  	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;  	unsigned nr_deactivate, nr_activate;  	unsigned nr_rotated = 0; -	isolate_mode_t isolate_mode = 0;  	int file = is_file_lru(lru);  	struct pglist_data *pgdat = lruvec_pgdat(lruvec);  	lru_add_drain(); -	if (!sc->may_unmap) -		isolate_mode |= ISOLATE_UNMAPPED; -  	spin_lock_irq(&pgdat->lru_lock);  	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, -				     &nr_scanned, sc, isolate_mode, lru); +				     &nr_scanned, sc, lru);  	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);  	reclaim_stat->recent_scanned[file] += nr_taken; @@ -2764,16 +2731,15 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)  				   sc->nr_reclaimed - reclaimed);  			/* -			 * Direct reclaim and kswapd have to scan all memory -			 * cgroups to fulfill the overall scan target for the -			 * node. +			 * Kswapd have to scan all memory cgroups to fulfill +			 * the overall scan target for the node.  			 *  			 * Limit reclaim, on the other hand, only cares about  			 * nr_to_reclaim pages to be reclaimed and it will  			 * retry with decreasing priority if one round over the  			 * whole hierarchy is not sufficient.  			 */ -			if (!global_reclaim(sc) && +			if (!current_is_kswapd() &&  					sc->nr_reclaimed >= sc->nr_to_reclaim) {  				mem_cgroup_iter_break(root, memcg);  				break; @@ -3537,7 +3503,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,   *   * kswapd scans the zones in the highmem->normal->dma direction.  It skips   * zones which have free_pages > high_wmark_pages(zone), but once a zone is - * found to have free_pages <= high_wmark_pages(zone), any page is that zone + * found to have free_pages <= high_wmark_pages(zone), any page in that zone   * or lower is eligible for reclaim until at least one usable zone is   * balanced.   */  |