aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBaoquan He <[email protected]>2018-08-17 15:48:42 -0700
committerLinus Torvalds <[email protected]>2018-08-17 16:20:31 -0700
commit07a34a8c36521c37119259d937d1389c3f5f6db9 (patch)
treec1378da8a9aef210acfa79f54d8c3258960f69bf
parentf2fc10e0b3fe7d1aecbd2cab6bf0007b6771e16d (diff)
mm/sparsemem.c: defer the ms->section_mem_map clearing
In sparse_init(), if CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y, system will allocate one continuous memory chunk for mem maps on one node and populate the relevant page tables to map memory section one by one. If fail to populate for a certain mem section, print warning and its ->section_mem_map will be cleared to cancel the marking of being present. Like this, the number of mem sections marked as present could become less during sparse_init() execution. Here just defer the ms->section_mem_map clearing if failed to populate its page tables until the last for_each_present_section_nr() loop. This is in preparation for later optimizing the mem map allocation. [[email protected]: remove now-unused local `ms', per Oscar] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Baoquan He <[email protected]> Acked-by: Dave Hansen <[email protected]> Reviewed-by: Pavel Tatashin <[email protected]> Reviewed-by: Oscar Salvador <[email protected]> Cc: Pasha Tatashin <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Pankaj Gupta <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r--mm/sparse-vmemmap.c4
-rw-r--r--mm/sparse.c12
2 files changed, 8 insertions, 8 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index bd0276d5f66b..68bb65b2d34d 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -292,18 +292,14 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
}
for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
- struct mem_section *ms;
-
if (!present_section_nr(pnum))
continue;
map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
if (map_map[pnum])
continue;
- ms = __nr_to_section(pnum);
pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
__func__);
- ms->section_mem_map = 0;
}
if (vmemmap_buf_start) {
diff --git a/mm/sparse.c b/mm/sparse.c
index 99a6383e98bc..eb31274aae8b 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -446,7 +446,6 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
ms = __nr_to_section(pnum);
pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
__func__);
- ms->section_mem_map = 0;
}
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
@@ -474,7 +473,6 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
__func__);
- ms->section_mem_map = 0;
return NULL;
}
#endif
@@ -578,17 +576,23 @@ void __init sparse_init(void)
#endif
for_each_present_section_nr(0, pnum) {
+ struct mem_section *ms;
+ ms = __nr_to_section(pnum);
usemap = usemap_map[pnum];
- if (!usemap)
+ if (!usemap) {
+ ms->section_mem_map = 0;
continue;
+ }
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
map = map_map[pnum];
#else
map = sparse_early_mem_map_alloc(pnum);
#endif
- if (!map)
+ if (!map) {
+ ms->section_mem_map = 0;
continue;
+ }
sparse_init_one_section(__nr_to_section(pnum), pnum, map,
usemap);