diff options
Diffstat (limited to 'arch/x86/entry')
-rw-r--r-- | arch/x86/entry/entry_32.S | 43 | ||||
-rw-r--r-- | arch/x86/entry/entry_64.S | 62 | ||||
-rw-r--r-- | arch/x86/entry/syscalls/syscall_32.tbl | 1 | ||||
-rw-r--r-- | arch/x86/entry/syscalls/syscall_64.tbl | 1 | ||||
-rw-r--r-- | arch/x86/entry/thunk_32.S | 2 | ||||
-rw-r--r-- | arch/x86/entry/thunk_64.S | 2 | ||||
-rw-r--r-- | arch/x86/entry/vdso/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/entry/vdso/vdso-layout.lds.S | 1 | ||||
-rw-r--r-- | arch/x86/entry/vdso/vdso32/system_call.S | 2 | ||||
-rw-r--r-- | arch/x86/entry/vdso/vsgx.S | 2 | ||||
-rw-r--r-- | arch/x86/entry/vsyscall/vsyscall_64.c | 2 | ||||
-rw-r--r-- | arch/x86/entry/vsyscall/vsyscall_emu_64.S | 6 |
12 files changed, 59 insertions, 67 deletions
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index ccb9d32768f3..887420844066 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -268,19 +268,16 @@ 1: popl %ds 2: popl %es 3: popl %fs - addl $(4 + \pop), %esp /* pop the unused "gs" slot */ +4: addl $(4 + \pop), %esp /* pop the unused "gs" slot */ IRET_FRAME -.pushsection .fixup, "ax" -4: movl $0, (%esp) - jmp 1b -5: movl $0, (%esp) - jmp 2b -6: movl $0, (%esp) - jmp 3b -.popsection - _ASM_EXTABLE(1b, 4b) - _ASM_EXTABLE(2b, 5b) - _ASM_EXTABLE(3b, 6b) + + /* + * There is no _ASM_EXTABLE_TYPE_REG() for ASM, however since this is + * ASM the registers are known and we can trivially hard-code them. + */ + _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_POP_ZERO|EX_REG_DS) + _ASM_EXTABLE_TYPE(2b, 3b, EX_TYPE_POP_ZERO|EX_REG_ES) + _ASM_EXTABLE_TYPE(3b, 4b, EX_TYPE_POP_ZERO|EX_REG_FS) .endm .macro RESTORE_ALL_NMI cr3_reg:req pop=0 @@ -740,7 +737,7 @@ SYM_FUNC_START(schedule_tail_wrapper) popl %eax FRAME_END - ret + RET SYM_FUNC_END(schedule_tail_wrapper) .popsection @@ -925,10 +922,8 @@ SYM_FUNC_START(entry_SYSENTER_32) sti sysexit -.pushsection .fixup, "ax" -2: movl $0, PT_FS(%esp) - jmp 1b -.popsection +2: movl $0, PT_FS(%esp) + jmp 1b _ASM_EXTABLE(1b, 2b) .Lsysenter_fix_flags: @@ -996,8 +991,7 @@ restore_all_switch_stack: */ iret -.section .fixup, "ax" -SYM_CODE_START(asm_iret_error) +.Lasm_iret_error: pushl $0 # no error code pushl $iret_error @@ -1014,9 +1008,8 @@ SYM_CODE_START(asm_iret_error) #endif jmp handle_exception -SYM_CODE_END(asm_iret_error) -.previous - _ASM_EXTABLE(.Lirq_return, asm_iret_error) + + _ASM_EXTABLE(.Lirq_return, .Lasm_iret_error) SYM_FUNC_END(entry_INT80_32) .macro FIXUP_ESPFIX_STACK @@ -1248,14 +1241,14 @@ SYM_CODE_START(asm_exc_nmi) SYM_CODE_END(asm_exc_nmi) .pushsection .text, "ax" -SYM_CODE_START(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_and_make_dead) /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp movl PER_CPU_VAR(cpu_current_top_of_stack), %esi leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp - call do_exit + call make_task_dead 1: jmp 1b -SYM_CODE_END(rewind_stack_do_exit) +SYM_CODE_END(rewind_stack_and_make_dead) .popsection diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index e38a4cf795d9..466df3e50276 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -574,6 +574,10 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) ud2 1: #endif +#ifdef CONFIG_XEN_PV + ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV +#endif + POP_REGS pop_rdi=0 /* @@ -734,14 +738,10 @@ SYM_FUNC_START(asm_load_gs_index) 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE swapgs FRAME_END - ret -SYM_FUNC_END(asm_load_gs_index) -EXPORT_SYMBOL(asm_load_gs_index) + RET - _ASM_EXTABLE(.Lgs_change, .Lbad_gs) - .section .fixup, "ax" /* running with kernelgs */ -SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs) +.Lbad_gs: swapgs /* switch back to user gs */ .macro ZAP_GS /* This can't be a string because the preprocessor needs to see it. */ @@ -752,8 +752,11 @@ SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs) xorl %eax, %eax movl %eax, %gs jmp 2b -SYM_CODE_END(.Lbad_gs) - .previous + + _ASM_EXTABLE(.Lgs_change, .Lbad_gs) + +SYM_FUNC_END(asm_load_gs_index) +EXPORT_SYMBOL(asm_load_gs_index) #ifdef CONFIG_XEN_PV /* @@ -885,11 +888,12 @@ SYM_CODE_START_LOCAL(paranoid_entry) * is needed here. */ SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx - ret + RET .Lparanoid_entry_checkgs: /* EBX = 1 -> kernel GSBASE active, no restore required */ movl $1, %ebx + /* * The kernel-enforced convention is a negative GSBASE indicates * a kernel value. No SWAPGS needed on entry and exit. @@ -897,22 +901,15 @@ SYM_CODE_START_LOCAL(paranoid_entry) movl $MSR_GS_BASE, %ecx rdmsr testl %edx, %edx - jns .Lparanoid_entry_swapgs - ret + js .Lparanoid_kernel_gsbase -.Lparanoid_entry_swapgs: + /* EBX = 0 -> SWAPGS required on exit */ + xorl %ebx, %ebx swapgs +.Lparanoid_kernel_gsbase: - /* - * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an - * unconditional CR3 write, even in the PTI case. So do an lfence - * to prevent GS speculation, regardless of whether PTI is enabled. - */ FENCE_SWAPGS_KERNEL_ENTRY - - /* EBX = 0 -> SWAPGS required on exit */ - xorl %ebx, %ebx - ret + RET SYM_CODE_END(paranoid_entry) /* @@ -991,12 +988,7 @@ SYM_CODE_START_LOCAL(error_entry) movq %rax, %rsp /* switch stack */ ENCODE_FRAME_POINTER pushq %r12 - ret - -.Lerror_entry_done_lfence: - FENCE_SWAPGS_KERNEL_ENTRY -.Lerror_entry_done: - ret + RET /* * There are two places in the kernel that can potentially fault with @@ -1020,8 +1012,14 @@ SYM_CODE_START_LOCAL(error_entry) * .Lgs_change's error handler with kernel gsbase. */ SWAPGS - FENCE_SWAPGS_USER_ENTRY - jmp .Lerror_entry_done + + /* + * Issue an LFENCE to prevent GS speculation, regardless of whether it is a + * kernel or user gsbase. + */ +.Lerror_entry_done_lfence: + FENCE_SWAPGS_KERNEL_ENTRY + RET .Lbstep_iret: /* Fix truncated RIP */ @@ -1429,7 +1427,7 @@ SYM_CODE_END(ignore_sysret) #endif .pushsection .text, "ax" -SYM_CODE_START(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_and_make_dead) UNWIND_HINT_FUNC /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp @@ -1438,6 +1436,6 @@ SYM_CODE_START(rewind_stack_do_exit) leaq -PTREGS_SIZE(%rax), %rsp UNWIND_HINT_REGS - call do_exit -SYM_CODE_END(rewind_stack_do_exit) + call make_task_dead +SYM_CODE_END(rewind_stack_and_make_dead) .popsection diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 7e25543693de..320480a8db4f 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -454,3 +454,4 @@ 447 i386 memfd_secret sys_memfd_secret 448 i386 process_mrelease sys_process_mrelease 449 i386 futex_waitv sys_futex_waitv +450 i386 set_mempolicy_home_node sys_set_mempolicy_home_node diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index fe8f8dd157b4..c84d12608cd2 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -371,6 +371,7 @@ 447 common memfd_secret sys_memfd_secret 448 common process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node # # Due to a historical design error, certain syscalls are numbered differently diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S index f1f96d4d8cd6..7591bab060f7 100644 --- a/arch/x86/entry/thunk_32.S +++ b/arch/x86/entry/thunk_32.S @@ -24,7 +24,7 @@ SYM_CODE_START_NOALIGN(\name) popl %edx popl %ecx popl %eax - ret + RET _ASM_NOKPROBE(\name) SYM_CODE_END(\name) .endm diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S index 496b11ec469d..505b488fcc65 100644 --- a/arch/x86/entry/thunk_64.S +++ b/arch/x86/entry/thunk_64.S @@ -50,7 +50,7 @@ SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore) popq %rsi popq %rdi popq %rbp - ret + RET _ASM_NOKPROBE(__thunk_restore) SYM_CODE_END(__thunk_restore) #endif diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index a2dddcc189f6..693f8b9031fb 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -172,7 +172,7 @@ $(obj)/vdso32.so.dbg: $(obj)/vdso32/vdso32.lds $(vobjs32) FORCE # The DSO images are built using a special linker script. # quiet_cmd_vdso = VDSO $@ - cmd_vdso = $(LD) -nostdlib -o $@ \ + cmd_vdso = $(LD) -o $@ \ $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \ -T $(filter %.lds,$^) $(filter %.o,$^) && \ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' diff --git a/arch/x86/entry/vdso/vdso-layout.lds.S b/arch/x86/entry/vdso/vdso-layout.lds.S index dc8da7695859..bafa73f09e92 100644 --- a/arch/x86/entry/vdso/vdso-layout.lds.S +++ b/arch/x86/entry/vdso/vdso-layout.lds.S @@ -77,7 +77,6 @@ SECTIONS .text : { *(.text*) - *(.fixup) } :text =0x90909090, diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S index 6ddd7a937b3e..d33c6513fd2c 100644 --- a/arch/x86/entry/vdso/vdso32/system_call.S +++ b/arch/x86/entry/vdso/vdso32/system_call.S @@ -78,7 +78,7 @@ SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL) popl %ecx CFI_RESTORE ecx CFI_ADJUST_CFA_OFFSET -4 - ret + RET CFI_ENDPROC .size __kernel_vsyscall,.-__kernel_vsyscall diff --git a/arch/x86/entry/vdso/vsgx.S b/arch/x86/entry/vdso/vsgx.S index 99dafac992e2..d77d278ee9dd 100644 --- a/arch/x86/entry/vdso/vsgx.S +++ b/arch/x86/entry/vdso/vsgx.S @@ -81,7 +81,7 @@ SYM_FUNC_START(__vdso_sgx_enter_enclave) pop %rbx leave .cfi_def_cfa %rsp, 8 - ret + RET /* The out-of-line code runs with the pre-leave stack frame. */ .cfi_def_cfa %rbp, 16 diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 0b6b277ee050..fd2ee9408e91 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -226,7 +226,7 @@ bool emulate_vsyscall(unsigned long error_code, if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) { warn_bad_vsyscall(KERN_DEBUG, regs, "seccomp tried to change syscall nr or ip"); - force_fatal_sig(SIGSYS); + force_exit_sig(SIGSYS); return true; } regs->orig_ax = -1; diff --git a/arch/x86/entry/vsyscall/vsyscall_emu_64.S b/arch/x86/entry/vsyscall/vsyscall_emu_64.S index 2e203f3a25a7..15e35159ebb6 100644 --- a/arch/x86/entry/vsyscall/vsyscall_emu_64.S +++ b/arch/x86/entry/vsyscall/vsyscall_emu_64.S @@ -19,17 +19,17 @@ __vsyscall_page: mov $__NR_gettimeofday, %rax syscall - ret + RET .balign 1024, 0xcc mov $__NR_time, %rax syscall - ret + RET .balign 1024, 0xcc mov $__NR_getcpu, %rax syscall - ret + RET .balign 4096, 0xcc |