diff options
Diffstat (limited to 'arch/x86/mm/init.c')
| -rw-r--r-- | arch/x86/mm/init.c | 37 | 
1 files changed, 17 insertions, 20 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index a97ee0801475..08a7d313538a 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping(  static unsigned long __init get_new_step_size(unsigned long step_size)  {  	/* -	 * Explain why we shift by 5 and why we don't have to worry about -	 * 'step_size << 5' overflowing: -	 * -	 * initial mapped size is PMD_SIZE (2M). +	 * Initial mapped size is PMD_SIZE (2M).  	 * We can not set step_size to be PUD_SIZE (1G) yet.  	 * In worse case, when we cross the 1G boundary, and  	 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) -	 * to map 1G range with PTE. Use 5 as shift for now. +	 * to map 1G range with PTE. Hence we use one less than the +	 * difference of page table level shifts.  	 * -	 * Don't need to worry about overflow, on 32bit, when step_size -	 * is 0, round_down() returns 0 for start, and that turns it -	 * into 0x100000000ULL. +	 * Don't need to worry about overflow in the top-down case, on 32bit, +	 * when step_size is 0, round_down() returns 0 for start, and that +	 * turns it into 0x100000000ULL. +	 * In the bottom-up case, round_up(x, 0) returns 0 though too, which +	 * needs to be taken into consideration by the code below.  	 */ -	return step_size << 5; +	return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);  }  /** @@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start,  	unsigned long step_size;  	unsigned long addr;  	unsigned long mapped_ram_size = 0; -	unsigned long new_mapped_ram_size;  	/* xen has big range in reserved near end of ram, skip it at first.*/  	addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); @@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start,  				start = map_start;  		} else  			start = map_start; -		new_mapped_ram_size = init_range_memory_mapping(start, +		mapped_ram_size += init_range_memory_mapping(start,  							last_start);  		last_start = start;  		min_pfn_mapped = last_start >> PAGE_SHIFT; -		/* only increase step_size after big range get mapped */ -		if (new_mapped_ram_size > mapped_ram_size) +		if (mapped_ram_size >= step_size)  			step_size = get_new_step_size(step_size); -		mapped_ram_size += new_mapped_ram_size;  	}  	if (real_end < map_end) @@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start,  static void __init memory_map_bottom_up(unsigned long map_start,  					unsigned long map_end)  { -	unsigned long next, new_mapped_ram_size, start; +	unsigned long next, start;  	unsigned long mapped_ram_size = 0;  	/* step_size need to be small so pgt_buf from BRK could cover it */  	unsigned long step_size = PMD_SIZE; @@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start,  	 * for page table.  	 */  	while (start < map_end) { -		if (map_end - start > step_size) { +		if (step_size && map_end - start > step_size) {  			next = round_up(start + 1, step_size);  			if (next > map_end)  				next = map_end; -		} else +		} else {  			next = map_end; +		} -		new_mapped_ram_size = init_range_memory_mapping(start, next); +		mapped_ram_size += init_range_memory_mapping(start, next);  		start = next; -		if (new_mapped_ram_size > mapped_ram_size) +		if (mapped_ram_size >= step_size)  			step_size = get_new_step_size(step_size); -		mapped_ram_size += new_mapped_ram_size;  	}  }  |