diff options
author | David Hildenbrand <[email protected]> | 2019-07-18 15:57:01 -0700 |
---|---|---|
committer | Linus Torvalds <[email protected]> | 2019-07-18 17:08:06 -0700 |
commit | 05f800a0bd08e14606ac63e0a5c63ed6880acaab (patch) | |
tree | f9acb2e3b72340518a9168bb3db8d1761ce117a4 | |
parent | db051a0dac13db24d58470d75cee0ce7c6b031a1 (diff) |
mm/memory_hotplug: drop MHP_MEMBLOCK_API
No longer needed, the callers of arch_add_memory() can handle this
manually.
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: David Hildenbrand <[email protected]>
Reviewed-by: Wei Yang <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: David Hildenbrand <[email protected]>
Cc: Oscar Salvador <[email protected]>
Cc: Pavel Tatashin <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Qian Cai <[email protected]>
Cc: Arun KS <[email protected]>
Cc: Mathieu Malaterre <[email protected]>
Cc: Mike Rapoport <[email protected]>
Cc: Alex Deucher <[email protected]>
Cc: Andrew Banman <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Anshuman Khandual <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Cc: Baoquan He <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Chintan Pandya <[email protected]>
Cc: Christophe Leroy <[email protected]>
Cc: Chris Wilson <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: Fenghua Yu <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Jonathan Cameron <[email protected]>
Cc: Jun Yao <[email protected]>
Cc: "Kirill A. Shutemov" <[email protected]>
Cc: Logan Gunthorpe <[email protected]>
Cc: Mark Brown <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Masahiro Yamada <[email protected]>
Cc: Michael Ellerman <[email protected]>
Cc: Mike Rapoport <[email protected]>
Cc: "[email protected]" <[email protected]>
Cc: Nicholas Piggin <[email protected]>
Cc: Oscar Salvador <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: "Rafael J. Wysocki" <[email protected]>
Cc: Rich Felker <[email protected]>
Cc: Rob Herring <[email protected]>
Cc: Robin Murphy <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: Vasily Gorbik <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Yoshinori Sato <[email protected]>
Cc: Yu Zhao <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r-- | include/linux/memory_hotplug.h | 8 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 9 |
2 files changed, 3 insertions, 14 deletions
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 87bf9c4a889e..36c514b80cf1 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -128,14 +128,6 @@ extern void arch_remove_memory(int nid, u64 start, u64 size, extern void __remove_pages(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); -/* - * Do we want sysfs memblock files created. This will allow userspace to online - * and offline memory explicitly. Lack of this bit means that the caller has to - * call move_pfn_range_to_zone to finish the initialization. - */ - -#define MHP_MEMBLOCK_API (1<<0) - /* reasonably generic interface to expand the physical pages */ extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, struct mhp_restrictions *restrictions); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 78291526eb4d..fb9dc3fa1138 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -251,7 +251,7 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat) #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ static int __meminit __add_section(int nid, unsigned long phys_start_pfn, - struct vmem_altmap *altmap, bool want_memblock) + struct vmem_altmap *altmap) { int ret; @@ -294,8 +294,7 @@ int __ref __add_pages(int nid, unsigned long phys_start_pfn, } for (i = start_sec; i <= end_sec; i++) { - err = __add_section(nid, section_nr_to_pfn(i), altmap, - restrictions->flags & MHP_MEMBLOCK_API); + err = __add_section(nid, section_nr_to_pfn(i), altmap); /* * EEXIST is finally dealt with by ioresource collision @@ -1065,9 +1064,7 @@ static int online_memory_block(struct memory_block *mem, void *arg) */ int __ref add_memory_resource(int nid, struct resource *res) { - struct mhp_restrictions restrictions = { - .flags = MHP_MEMBLOCK_API, - }; + struct mhp_restrictions restrictions = {}; u64 start, size; bool new_node = false; int ret; |