diff options
author | Jiri Slaby <[email protected]> | 2019-10-11 13:51:04 +0200 |
---|---|---|
committer | Borislav Petkov <[email protected]> | 2019-10-18 11:58:33 +0200 |
commit | 6dcc5627f6aec4cb1d1494d06a48d8061db06a04 (patch) | |
tree | e28f50a9359cf1deda2d28e8cfbc9f15f9310d22 /arch/x86/xen/xen-asm.S | |
parent | bc7b11c04ee9c9b0451ebf85bf64e0de69fdbb17 (diff) |
x86/asm: Change all ENTRY+ENDPROC to SYM_FUNC_*
These are all functions which are invoked from elsewhere, so annotate
them as global using the new SYM_FUNC_START and their ENDPROC's by
SYM_FUNC_END.
Make sure ENTRY/ENDPROC is not defined on X86_64, given these were the
last users.
Signed-off-by: Jiri Slaby <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Reviewed-by: Rafael J. Wysocki <[email protected]> [hibernate]
Reviewed-by: Boris Ostrovsky <[email protected]> [xen bits]
Acked-by: Herbert Xu <[email protected]> [crypto]
Cc: Allison Randal <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Andy Shevchenko <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Cc: Armijn Hemel <[email protected]>
Cc: Cao jin <[email protected]>
Cc: Darren Hart <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: Enrico Weigelt <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: Herbert Xu <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Jim Mattson <[email protected]>
Cc: Joerg Roedel <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Juergen Gross <[email protected]>
Cc: Kate Stewart <[email protected]>
Cc: "Kirill A. Shutemov" <[email protected]>
Cc: kvm ML <[email protected]>
Cc: Len Brown <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: linux-efi <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: Mark Rutland <[email protected]>
Cc: Matt Fleming <[email protected]>
Cc: Paolo Bonzini <[email protected]>
Cc: Pavel Machek <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: "Radim Krčmář" <[email protected]>
Cc: Sean Christopherson <[email protected]>
Cc: Stefano Stabellini <[email protected]>
Cc: "Steven Rostedt (VMware)" <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vitaly Kuznetsov <[email protected]>
Cc: Wanpeng Li <[email protected]>
Cc: Wei Huang <[email protected]>
Cc: x86-ml <[email protected]>
Cc: [email protected]
Cc: Xiaoyao Li <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
Diffstat (limited to 'arch/x86/xen/xen-asm.S')
-rw-r--r-- | arch/x86/xen/xen-asm.S | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index be104eef80be..508fe204520b 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -19,7 +19,7 @@ * event status with one and operation. If there are pending events, * then enter the hypervisor to get them handled. */ -ENTRY(xen_irq_enable_direct) +SYM_FUNC_START(xen_irq_enable_direct) FRAME_BEGIN /* Unmask events */ movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask @@ -38,17 +38,17 @@ ENTRY(xen_irq_enable_direct) 1: FRAME_END ret - ENDPROC(xen_irq_enable_direct) +SYM_FUNC_END(xen_irq_enable_direct) /* * Disabling events is simply a matter of making the event mask * non-zero. */ -ENTRY(xen_irq_disable_direct) +SYM_FUNC_START(xen_irq_disable_direct) movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask ret -ENDPROC(xen_irq_disable_direct) +SYM_FUNC_END(xen_irq_disable_direct) /* * (xen_)save_fl is used to get the current interrupt enable status. @@ -59,12 +59,12 @@ ENDPROC(xen_irq_disable_direct) * undefined. We need to toggle the state of the bit, because Xen and * x86 use opposite senses (mask vs enable). */ -ENTRY(xen_save_fl_direct) +SYM_FUNC_START(xen_save_fl_direct) testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask setz %ah addb %ah, %ah ret - ENDPROC(xen_save_fl_direct) +SYM_FUNC_END(xen_save_fl_direct) /* @@ -74,7 +74,7 @@ ENTRY(xen_save_fl_direct) * interrupt mask state, it checks for unmasked pending events and * enters the hypervisor to get them delivered if so. */ -ENTRY(xen_restore_fl_direct) +SYM_FUNC_START(xen_restore_fl_direct) FRAME_BEGIN #ifdef CONFIG_X86_64 testw $X86_EFLAGS_IF, %di @@ -95,14 +95,14 @@ ENTRY(xen_restore_fl_direct) 1: FRAME_END ret - ENDPROC(xen_restore_fl_direct) +SYM_FUNC_END(xen_restore_fl_direct) /* * Force an event check by making a hypercall, but preserve regs * before making the call. */ -ENTRY(check_events) +SYM_FUNC_START(check_events) FRAME_BEGIN #ifdef CONFIG_X86_32 push %eax @@ -135,19 +135,19 @@ ENTRY(check_events) #endif FRAME_END ret -ENDPROC(check_events) +SYM_FUNC_END(check_events) -ENTRY(xen_read_cr2) +SYM_FUNC_START(xen_read_cr2) FRAME_BEGIN _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX FRAME_END ret - ENDPROC(xen_read_cr2); +SYM_FUNC_END(xen_read_cr2); -ENTRY(xen_read_cr2_direct) +SYM_FUNC_START(xen_read_cr2_direct) FRAME_BEGIN _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX FRAME_END ret - ENDPROC(xen_read_cr2_direct); +SYM_FUNC_END(xen_read_cr2_direct); |