aboutsummaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2019-02-11 12:52:51 +0100
committerIngo Molnar <mingo@kernel.org>2019-02-11 12:52:51 +0100
commitdc14b5fe7d0a1e37e9388964614676e4166b2953 (patch)
tree4f4f3aed094446b3e5daedd8710ccd831943490f /mm/page_alloc.c
parentbae54dc4f353653bbd3e3081754adad05da1d4dd (diff)
parentd13937116f1e82bf508a6325111b322c30c85eb9 (diff)
Merge tag 'v5.0-rc6' into x86/fpu, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c12
1 files changed, 0 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d295c9bc01a8..35fdde041f5c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5701,18 +5701,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
cond_resched();
}
}
-#ifdef CONFIG_SPARSEMEM
- /*
- * If the zone does not span the rest of the section then
- * we should at least initialize those pages. Otherwise we
- * could blow up on a poisoned page in some paths which depend
- * on full sections being initialized (e.g. memory hotplug).
- */
- while (end_pfn % PAGES_PER_SECTION) {
- __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid);
- end_pfn++;
- }
-#endif
}
#ifdef CONFIG_ZONE_DEVICE