aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm/init.c')
-rw-r--r--arch/arm64/mm/init.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 3685e12aba9b..e55409caaee3 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -35,6 +35,7 @@
#include <asm/fixmap.h>
#include <asm/kasan.h>
#include <asm/kernel-pgtable.h>
+#include <asm/kvm_host.h>
#include <asm/memory.h>
#include <asm/numa.h>
#include <asm/sections.h>
@@ -42,6 +43,7 @@
#include <linux/sizes.h>
#include <asm/tlb.h>
#include <asm/alternative.h>
+#include <asm/xen/swiotlb-xen.h>
/*
* We need to be able to catch inadvertent references to memstart_addr
@@ -220,6 +222,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
int pfn_valid(unsigned long pfn)
{
phys_addr_t addr = PFN_PHYS(pfn);
+ struct mem_section *ms;
/*
* Ensure the upper PAGE_SHIFT bits are clear in the
@@ -230,10 +233,6 @@ int pfn_valid(unsigned long pfn)
if (PHYS_PFN(addr) != pfn)
return 0;
-#ifdef CONFIG_SPARSEMEM
-{
- struct mem_section *ms;
-
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
@@ -252,8 +251,7 @@ int pfn_valid(unsigned long pfn)
*/
if (!early_section(ms))
return pfn_section_valid(ms, pfn);
-}
-#endif
+
return memblock_is_map_memory(addr);
}
EXPORT_SYMBOL(pfn_valid);
@@ -452,6 +450,8 @@ void __init bootmem_init(void)
dma_pernuma_cma_reserve();
+ kvm_hyp_reserve();
+
/*
* sparse_init() tries to allocate memory from memblock, so must be
* done after the fixed reservations
@@ -483,7 +483,7 @@ void __init mem_init(void)
if (swiotlb_force == SWIOTLB_FORCE ||
max_pfn > PFN_DOWN(arm64_dma_phys_limit))
swiotlb_init(1);
- else
+ else if (!xen_swiotlb_detect())
swiotlb_force = SWIOTLB_NO_FORCE;
set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
@@ -491,8 +491,6 @@ void __init mem_init(void)
/* this will put all unused low memory onto the freelists */
memblock_free_all();
- mem_init_print_info(NULL);
-
/*
* Check boundaries twice: Some fundamental inconsistencies can be
* detected at build time already.
@@ -521,7 +519,7 @@ void free_initmem(void)
* prevents the region from being reused for kernel modules, which
* is not supported by kallsyms.
*/
- unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
+ vunmap_range((u64)__init_begin, (u64)__init_end);
}
void dump_mem_limit(void)