aboutsummaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorLinus Torvalds <[email protected]>2018-08-17 16:49:31 -0700
committerLinus Torvalds <[email protected]>2018-08-17 16:49:31 -0700
commit6ada4e2826794bdf8d88f938a9ced0b80894b037 (patch)
tree4a39e46d6c4502ae9346566b2e384dcc2205c014 /mm/page_alloc.c
parent9bd553929f68921be0f2014dd06561e0c8249a0d (diff)
parent1e9264192961aa519595170aa8b0f7651a2ad28e (diff)
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: - a few misc things - a few Y2038 fixes - ntfs fixes - arch/sh tweaks - ocfs2 updates - most of MM * emailed patches from Andrew Morton <[email protected]>: (111 commits) mm/hmm.c: remove unused variables align_start and align_end fs/userfaultfd.c: remove redundant pointer uwq mm, vmacache: hash addresses based on pmd mm/list_lru: introduce list_lru_shrink_walk_irq() mm/list_lru.c: pass struct list_lru_node* as an argument to __list_lru_walk_one() mm/list_lru.c: move locking from __list_lru_walk_one() to its caller mm/list_lru.c: use list_lru_walk_one() in list_lru_walk_node() mm, swap: make CONFIG_THP_SWAP depend on CONFIG_SWAP mm/sparse: delete old sparse_init and enable new one mm/sparse: add new sparse_init_nid() and sparse_init() mm/sparse: move buffer init/fini to the common place mm/sparse: use the new sparse buffer functions in non-vmemmap mm/sparse: abstract sparse buffer allocations mm/hugetlb.c: don't zero 1GiB bootmem pages mm, page_alloc: double zone's batchsize mm/oom_kill.c: document oom_lock mm/hugetlb: remove gigantic page support for HIGHMEM mm, oom: remove sleep from under oom_lock kernel/dma: remove unsupported gfp_mask parameter from dma_alloc_from_contiguous() mm/cma: remove unsupported gfp_mask parameter from cma_alloc() ...
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c33
1 files changed, 16 insertions, 17 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0922ef5d2e46..15ea511fb41c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4165,11 +4165,12 @@ retry:
alloc_flags = reserve_flags;
/*
- * Reset the zonelist iterators if memory policies can be ignored.
- * These allocations are high priority and system rather than user
- * orientated.
+ * Reset the nodemask and zonelist iterators if memory policies can be
+ * ignored. These allocations are high priority and system rather than
+ * user oriented.
*/
if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
+ ac->nodemask = NULL;
ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
ac->high_zoneidx, ac->nodemask);
}
@@ -4403,19 +4404,15 @@ out:
EXPORT_SYMBOL(__alloc_pages_nodemask);
/*
- * Common helper functions.
+ * Common helper functions. Never use with __GFP_HIGHMEM because the returned
+ * address cannot represent highmem pages. Use alloc_pages and then kmap if
+ * you need to access high mem.
*/
unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
{
struct page *page;
- /*
- * __get_free_pages() returns a virtual address, which cannot represent
- * a highmem page
- */
- VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
-
- page = alloc_pages(gfp_mask, order);
+ page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
if (!page)
return 0;
return (unsigned long) page_address(page);
@@ -5567,13 +5564,12 @@ static int zone_batchsize(struct zone *zone)
/*
* The per-cpu-pages pools are set to around 1000th of the
- * size of the zone. But no more than 1/2 of a meg.
- *
- * OK, so we don't know how big the cache is. So guess.
+ * size of the zone.
*/
batch = zone->managed_pages / 1024;
- if (batch * PAGE_SIZE > 512 * 1024)
- batch = (512 * 1024) / PAGE_SIZE;
+ /* But no more than a meg. */
+ if (batch * PAGE_SIZE > 1024 * 1024)
+ batch = (1024 * 1024) / PAGE_SIZE;
batch /= 4; /* We effectively *= 4 below */
if (batch < 1)
batch = 1;
@@ -6405,8 +6401,11 @@ void __paginginit zero_resv_unavail(void)
pgcnt = 0;
for_each_resv_unavail_range(i, &start, &end) {
for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
- if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
+ if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
+ pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
+ + pageblock_nr_pages - 1;
continue;
+ }
mm_zero_struct_page(pfn_to_page(pfn));
pgcnt++;
}