diff options
-rw-r--r-- | Documentation/arch/x86/boot.rst | 3 | ||||
-rw-r--r-- | arch/x86/Kconfig | 10 | ||||
-rw-r--r-- | arch/x86/boot/compressed/ident_map_64.c | 2 | ||||
-rw-r--r-- | arch/x86/boot/compressed/misc.c | 40 | ||||
-rw-r--r-- | arch/x86/boot/compressed/misc.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/head_64.S | 35 | ||||
-rw-r--r-- | arch/x86/kernel/kexec-bzimage64.c | 5 | ||||
-rw-r--r-- | arch/x86/realmode/rm/trampoline_64.S | 33 |
8 files changed, 80 insertions, 51 deletions
diff --git a/Documentation/arch/x86/boot.rst b/Documentation/arch/x86/boot.rst index c513855a54bb..4fd492cb4970 100644 --- a/Documentation/arch/x86/boot.rst +++ b/Documentation/arch/x86/boot.rst @@ -878,7 +878,8 @@ Protocol: 2.10+ address if possible. A non-relocatable kernel will unconditionally move itself and to run - at this address. + at this address. A relocatable kernel will move itself to this address if it + loaded below this address. ============ ======= Field name: init_size diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5edec175b9bf..1a33575f98af 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2114,11 +2114,11 @@ config PHYSICAL_START help This gives the physical address where the kernel is loaded. - If kernel is a not relocatable (CONFIG_RELOCATABLE=n) then - bzImage will decompress itself to above physical address and - run from there. Otherwise, bzImage will run from the address where - it has been loaded by the boot loader and will ignore above physical - address. + If the kernel is not relocatable (CONFIG_RELOCATABLE=n) then bzImage + will decompress itself to above physical address and run from there. + Otherwise, bzImage will run from the address where it has been loaded + by the boot loader. The only exception is if it is loaded below the + above physical address, in which case it will relocate itself there. In normal kdump cases one does not have to set/change this option as now bzImage can be compiled as a completely relocatable image diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c index d040080d7edb..4a029baa5147 100644 --- a/arch/x86/boot/compressed/ident_map_64.c +++ b/arch/x86/boot/compressed/ident_map_64.c @@ -389,5 +389,5 @@ void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code) void do_boot_nmi_trap(struct pt_regs *regs, unsigned long error_code) { - /* Empty handler to ignore NMI during early boot */ + spurious_nmi_count++; } diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index b99e08e6815b..bd6857a9f15a 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -52,6 +52,7 @@ struct port_io_ops pio_ops; memptr free_mem_ptr; memptr free_mem_end_ptr; +int spurious_nmi_count; static char *vidmem; static int vidport; @@ -164,21 +165,34 @@ void __putstr(const char *s) outb(0xff & (pos >> 1), vidport+1); } -void __puthex(unsigned long value) +static noinline void __putnum(unsigned long value, unsigned int base, + int mindig) { - char alpha[2] = "0"; - int bits; + char buf[8*sizeof(value)+1]; + char *p; - for (bits = sizeof(value) * 8 - 4; bits >= 0; bits -= 4) { - unsigned long digit = (value >> bits) & 0xf; + p = buf + sizeof(buf); + *--p = '\0'; - if (digit < 0xA) - alpha[0] = '0' + digit; - else - alpha[0] = 'a' + (digit - 0xA); + while (mindig-- > 0 || value) { + unsigned char digit = value % base; + digit += (digit >= 10) ? ('a'-10) : '0'; + *--p = digit; - __putstr(alpha); + value /= base; } + + __putstr(p); +} + +void __puthex(unsigned long value) +{ + __putnum(value, 16, sizeof(value)*2); +} + +void __putdec(unsigned long value) +{ + __putnum(value, 10, 1); } #ifdef CONFIG_X86_NEED_RELOCS @@ -493,6 +507,12 @@ asmlinkage __visible void *extract_kernel(void *rmode, unsigned char *output) /* Disable exception handling before booting the kernel */ cleanup_exception_handling(); + if (spurious_nmi_count) { + error_putstr("Spurious early NMIs ignored: "); + error_putdec(spurious_nmi_count); + error_putstr("\n"); + } + return output + entry_offset; } diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index bc2f0f17fb90..b353a7be380c 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -59,12 +59,15 @@ extern char _head[], _end[]; /* misc.c */ extern memptr free_mem_ptr; extern memptr free_mem_end_ptr; +extern int spurious_nmi_count; void *malloc(int size); void free(void *where); void __putstr(const char *s); void __puthex(unsigned long value); +void __putdec(unsigned long value); #define error_putstr(__x) __putstr(__x) #define error_puthex(__x) __puthex(__x) +#define error_putdec(__x) __putdec(__x) #ifdef CONFIG_X86_VERBOSE_BOOTUP diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index d4918d03efb4..bfbac50dbe69 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -428,39 +428,10 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) movq %r15, %rdi .Ljump_to_C_code: - /* - * Jump to run C code and to be on a real kernel address. - * Since we are running on identity-mapped space we have to jump - * to the full 64bit address, this is only possible as indirect - * jump. In addition we need to ensure %cs is set so we make this - * a far return. - * - * Note: do not change to far jump indirect with 64bit offset. - * - * AMD does not support far jump indirect with 64bit offset. - * AMD64 Architecture Programmer's Manual, Volume 3: states only - * JMP FAR mem16:16 FF /5 Far jump indirect, - * with the target specified by a far pointer in memory. - * JMP FAR mem16:32 FF /5 Far jump indirect, - * with the target specified by a far pointer in memory. - * - * Intel64 does support 64bit offset. - * Software Developer Manual Vol 2: states: - * FF /5 JMP m16:16 Jump far, absolute indirect, - * address given in m16:16 - * FF /5 JMP m16:32 Jump far, absolute indirect, - * address given in m16:32. - * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, - * address given in m16:64. - */ - pushq $.Lafter_lret # put return address on stack for unwinder xorl %ebp, %ebp # clear frame pointer - movq initial_code(%rip), %rax - pushq $__KERNEL_CS # set correct cs - pushq %rax # target address in negative space - lretq -.Lafter_lret: - ANNOTATE_NOENDBR + ANNOTATE_RETPOLINE_SAFE + callq *initial_code(%rip) + ud2 SYM_CODE_END(secondary_startup_64) #include "verify_cpu.S" diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index 2a422e00ed4b..cde167b0ea92 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -503,7 +503,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel, kbuf.bufsz = kernel_len - kern16_size; kbuf.memsz = PAGE_ALIGN(header->init_size); kbuf.buf_align = header->kernel_alignment; - kbuf.buf_min = MIN_KERNEL_LOAD_ADDR; + if (header->pref_address < MIN_KERNEL_LOAD_ADDR) + kbuf.buf_min = MIN_KERNEL_LOAD_ADDR; + else + kbuf.buf_min = header->pref_address; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ret = kexec_add_buffer(&kbuf); if (ret) diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S index c9f76fae902e..14d9c7daf90f 100644 --- a/arch/x86/realmode/rm/trampoline_64.S +++ b/arch/x86/realmode/rm/trampoline_64.S @@ -37,13 +37,15 @@ .text .code16 -.macro LOCK_AND_LOAD_REALMODE_ESP lock_pa=0 +.macro LOCK_AND_LOAD_REALMODE_ESP lock_pa=0 lock_rip=0 /* * Make sure only one CPU fiddles with the realmode stack */ .Llock_rm\@: .if \lock_pa lock btsl $0, pa_tr_lock + .elseif \lock_rip + lock btsl $0, tr_lock(%rip) .else lock btsl $0, tr_lock .endif @@ -220,6 +222,35 @@ SYM_CODE_START(trampoline_start64) lidt tr_idt(%rip) lgdt tr_gdt64(%rip) + /* Check if paging mode has to be changed */ + movq %cr4, %rax + xorl tr_cr4(%rip), %eax + testl $X86_CR4_LA57, %eax + jnz .L_switch_paging + + /* Paging mode is correct proceed in 64-bit mode */ + + LOCK_AND_LOAD_REALMODE_ESP lock_rip=1 + + movw $__KERNEL_DS, %dx + movl %edx, %ss + addl $pa_real_mode_base, %esp + movl %edx, %ds + movl %edx, %es + movl %edx, %fs + movl %edx, %gs + + movl $pa_trampoline_pgd, %eax + movq %rax, %cr3 + + pushq $__KERNEL_CS + pushq tr_start(%rip) + lretq +.L_switch_paging: + /* + * To switch between 4- and 5-level paging modes, it is necessary + * to disable paging. This must be done in the compatibility mode. + */ ljmpl *tr_compat(%rip) SYM_CODE_END(trampoline_start64) |