diff options
Diffstat (limited to 'arch/arm64/include/asm/processor.h')
-rw-r--r-- | arch/arm64/include/asm/processor.h | 22 |
1 files changed, 13 insertions, 9 deletions
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 79657ad91397..6b0d4dff5012 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -24,14 +24,15 @@ #define KERNEL_DS UL(-1) #define USER_DS (TASK_SIZE_64 - 1) -#ifndef __ASSEMBLY__ - /* - * Default implementation of macro that returns current - * instruction pointer ("program counter"). + * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is + * no point in shifting all network buffers by 2 bytes just to make some IP + * header fields appear aligned in memory, potentially sacrificing some DMA + * performance on some platforms. */ -#define current_text_addr() ({ __label__ _l; _l: &&_l;}) +#define NET_IP_ALIGN 0 +#ifndef __ASSEMBLY__ #ifdef __KERNEL__ #include <linux/build_bug.h> @@ -174,6 +175,10 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc, { start_thread_common(regs, pc); regs->pstate = PSR_MODE_EL0t; + + if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) + regs->pstate |= PSR_SSBS_BIT; + regs->sp = sp; } @@ -190,6 +195,9 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, regs->pstate |= PSR_AA32_E_BIT; #endif + if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) + regs->pstate |= PSR_AA32_SSBS_BIT; + regs->compat_sp = sp; } #endif @@ -244,10 +252,6 @@ static inline void spin_lock_prefetch(const void *ptr) #endif -void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused); -void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused); -void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused); - extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */ extern void __init minsigstksz_setup(void); |