diff options
| author | Ingo Molnar <[email protected]> | 2020-02-24 11:36:09 +0100 |
|---|---|---|
| committer | Ingo Molnar <[email protected]> | 2020-02-24 11:36:09 +0100 |
| commit | 546121b65f47384e11ec1fa2e55449fc9f4846b2 (patch) | |
| tree | 8f18470ec7c0c77b0f48eb1b2338e591b0b0aaff /arch/arm64/kernel/process.c | |
| parent | 000619680c3714020ce9db17eef6a4a7ce2dc28b (diff) | |
| parent | f8788d86ab28f61f7b46eb6be375f8a726783636 (diff) | |
Merge tag 'v5.6-rc3' into sched/core, to pick up fixes and dependent patches
Signed-off-by: Ingo Molnar <[email protected]>
Diffstat (limited to 'arch/arm64/kernel/process.c')
| -rw-r--r-- | arch/arm64/kernel/process.c | 25 |
1 files changed, 15 insertions, 10 deletions
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 71f788cd2b18..00626057a384 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -360,8 +360,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) asmlinkage void ret_from_fork(void) asm("ret_from_fork"); -int copy_thread(unsigned long clone_flags, unsigned long stack_start, - unsigned long stk_sz, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, + unsigned long stk_sz, struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); @@ -394,11 +394,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, } /* - * If a TLS pointer was passed to clone (4th argument), use it - * for the new thread. + * If a TLS pointer was passed to clone, use it for the new + * thread. */ if (clone_flags & CLONE_SETTLS) - p->thread.uw.tp_value = childregs->regs[3]; + p->thread.uw.tp_value = tls; } else { memset(childregs, 0, sizeof(struct pt_regs)); childregs->pstate = PSR_MODE_EL1h; @@ -466,6 +466,13 @@ static void ssbs_thread_switch(struct task_struct *next) if (unlikely(next->flags & PF_KTHREAD)) return; + /* + * If all CPUs implement the SSBS extension, then we just need to + * context-switch the PSTATE field. + */ + if (cpu_have_feature(cpu_feature(SSBS))) + return; + /* If the mitigation is enabled, then we leave SSBS clear. */ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || test_tsk_thread_flag(next, TIF_SSBD)) @@ -608,8 +615,6 @@ long get_tagged_addr_ctrl(void) * only prevents the tagged address ABI enabling via prctl() and does not * disable it for tasks that already opted in to the relaxed ABI. */ -static int zero; -static int one = 1; static struct ctl_table tagged_addr_sysctl_table[] = { { @@ -618,8 +623,8 @@ static struct ctl_table tagged_addr_sysctl_table[] = { .data = &tagged_addr_disabled, .maxlen = sizeof(int), .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &one, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, }, { } }; @@ -646,6 +651,6 @@ asmlinkage void __sched arm64_preempt_schedule_irq(void) * Only allow a task to be preempted once cpufeatures have been * enabled. */ - if (static_branch_likely(&arm64_const_caps_ready)) + if (system_capabilities_finalized()) preempt_schedule_irq(); } |