diff options
| author | Thomas Gleixner <[email protected]> | 2021-03-14 16:34:35 +0100 |
|---|---|---|
| committer | Thomas Gleixner <[email protected]> | 2021-03-14 16:34:35 +0100 |
| commit | b470ebc9e0e57f53d1db9c49b8a3de4086babd05 (patch) | |
| tree | 95c61291ad5f216967a9be36f19774026ffc88cb /mm/memblock.c | |
| parent | 4c7bcb51ae25f79e3733982e5d0cd8ce8640ddfc (diff) | |
| parent | 5fbecd2389f48e1415799c63130d0cdce1cf3f60 (diff) | |
Merge tag 'irqchip-fixes-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/urgent
Pull irqchip fixes from Marc Zyngier:
- More compatible strings for the Ingenic irqchip (introducing the
JZ4760B SoC)
- Select GENERIC_IRQ_MULTI_HANDLER on the ARM ep93xx platform
- Drop all GENERIC_IRQ_MULTI_HANDLER selections from the irqchip
Kconfig, now relying on the architecture to get it right
- Drop the debugfs_file field from struct irq_domain, now that
debugfs can track things on its own
Diffstat (limited to 'mm/memblock.c')
| -rw-r--r-- | mm/memblock.c | 55 |
1 files changed, 7 insertions, 48 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index 1eaaec1e7687..afaefa8fc6ab 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -275,14 +275,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, * * Find @size free area aligned to @align in the specified range and node. * - * When allocation direction is bottom-up, the @start should be greater - * than the end of the kernel image. Otherwise, it will be trimmed. The - * reason is that we want the bottom-up allocation just near the kernel - * image so it is highly likely that the allocated memory and the kernel - * will reside in the same node. - * - * If bottom-up allocation failed, will try to allocate memory top-down. - * * Return: * Found address on success, 0 on failure. */ @@ -291,8 +283,6 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, phys_addr_t end, int nid, enum memblock_flags flags) { - phys_addr_t kernel_end, ret; - /* pump up @end */ if (end == MEMBLOCK_ALLOC_ACCESSIBLE || end == MEMBLOCK_ALLOC_KASAN) @@ -301,40 +291,13 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, /* avoid allocating the first page */ start = max_t(phys_addr_t, start, PAGE_SIZE); end = max(start, end); - kernel_end = __pa_symbol(_end); - - /* - * try bottom-up allocation only when bottom-up mode - * is set and @end is above the kernel image. - */ - if (memblock_bottom_up() && end > kernel_end) { - phys_addr_t bottom_up_start; - - /* make sure we will allocate above the kernel */ - bottom_up_start = max(start, kernel_end); - - /* ok, try bottom-up allocation first */ - ret = __memblock_find_range_bottom_up(bottom_up_start, end, - size, align, nid, flags); - if (ret) - return ret; - /* - * we always limit bottom-up allocation above the kernel, - * but top-down allocation doesn't have the limit, so - * retrying top-down allocation may succeed when bottom-up - * allocation failed. - * - * bottom-up allocation is expected to be fail very rarely, - * so we use WARN_ONCE() here to see the stack trace if - * fail happens. - */ - WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), - "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); - } - - return __memblock_find_range_top_down(start, end, size, align, nid, - flags); + if (memblock_bottom_up()) + return __memblock_find_range_bottom_up(start, end, size, align, + nid, flags); + else + return __memblock_find_range_top_down(start, end, size, align, + nid, flags); } /** @@ -2087,10 +2050,8 @@ void __init reset_all_zones_managed_pages(void) /** * memblock_free_all - release free pages to the buddy allocator - * - * Return: the number of pages actually released. */ -unsigned long __init memblock_free_all(void) +void __init memblock_free_all(void) { unsigned long pages; @@ -2099,8 +2060,6 @@ unsigned long __init memblock_free_all(void) pages = free_low_memory_core_early(); totalram_pages_add(pages); - - return pages; } #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK) |