diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2021-04-15 14:00:48 +0100 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2021-04-15 14:00:48 +0100 |
commit | a27a8816568964fcef62a3ae5f9d2228ec1ebc68 (patch) | |
tree | 7197c94bcf5b600bb43e7aa70ba2b05a7fb82cd1 /arch/arm64/kernel | |
parent | 604df13d7aadae6902d3b7f03a35bb21d887f0cf (diff) | |
parent | b90e483938ce387c256e03fb144f82f64551847b (diff) |
Merge branch 'for-next/pac-set-get-enabled-keys' into for-next/core
* for-next/pac-set-get-enabled-keys:
: Introduce arm64 prctl(PR_PAC_{SET,GET}_ENABLED_KEYS).
arm64: pac: Optimize kernel entry/exit key installation code paths
arm64: Introduce prctl(PR_PAC_{SET,GET}_ENABLED_KEYS)
arm64: mte: make the per-task SCTLR_EL1 field usable elsewhere
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r-- | arch/arm64/kernel/asm-offsets.c | 5 | ||||
-rw-r--r-- | arch/arm64/kernel/entry.S | 46 | ||||
-rw-r--r-- | arch/arm64/kernel/mte.c | 51 | ||||
-rw-r--r-- | arch/arm64/kernel/pointer_auth.c | 63 | ||||
-rw-r--r-- | arch/arm64/kernel/process.c | 33 | ||||
-rw-r--r-- | arch/arm64/kernel/ptrace.c | 41 | ||||
-rw-r--r-- | arch/arm64/kernel/suspend.c | 3 |
7 files changed, 192 insertions, 50 deletions
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index cc7267a24bf7..e797603e55b7 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -43,6 +43,7 @@ int main(void) #endif BLANK(); DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); + DEFINE(THREAD_SCTLR_USER, offsetof(struct task_struct, thread.sctlr_user)); #ifdef CONFIG_ARM64_PTR_AUTH DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user)); DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel)); @@ -149,10 +150,6 @@ int main(void) #endif #ifdef CONFIG_ARM64_PTR_AUTH DEFINE(PTRAUTH_USER_KEY_APIA, offsetof(struct ptrauth_keys_user, apia)); - DEFINE(PTRAUTH_USER_KEY_APIB, offsetof(struct ptrauth_keys_user, apib)); - DEFINE(PTRAUTH_USER_KEY_APDA, offsetof(struct ptrauth_keys_user, apda)); - DEFINE(PTRAUTH_USER_KEY_APDB, offsetof(struct ptrauth_keys_user, apdb)); - DEFINE(PTRAUTH_USER_KEY_APGA, offsetof(struct ptrauth_keys_user, apga)); DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia)); BLANK(); #endif diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a45b4ebbfe7d..2df32a516ffe 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -247,7 +247,29 @@ alternative_else_nop_endif check_mte_async_tcf x19, x22 apply_ssbd 1, x22, x23 - ptrauth_keys_install_kernel tsk, x20, x22, x23 +#ifdef CONFIG_ARM64_PTR_AUTH +alternative_if ARM64_HAS_ADDRESS_AUTH + /* + * Enable IA for in-kernel PAC if the task had it disabled. Although + * this could be implemented with an unconditional MRS which would avoid + * a load, this was measured to be slower on Cortex-A75 and Cortex-A76. + * + * Install the kernel IA key only if IA was enabled in the task. If IA + * was disabled on kernel exit then we would have left the kernel IA + * installed so there is no need to install it again. + */ + ldr x0, [tsk, THREAD_SCTLR_USER] + tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f + __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23 + b 2f +1: + mrs x0, sctlr_el1 + orr x0, x0, SCTLR_ELx_ENIA + msr sctlr_el1, x0 +2: + isb +alternative_else_nop_endif +#endif mte_set_kernel_gcr x22, x23 @@ -351,8 +373,26 @@ alternative_else_nop_endif 3: scs_save tsk, x0 - /* No kernel C function calls after this as user keys are set. */ - ptrauth_keys_install_user tsk, x0, x1, x2 +#ifdef CONFIG_ARM64_PTR_AUTH +alternative_if ARM64_HAS_ADDRESS_AUTH + /* + * IA was enabled for in-kernel PAC. Disable it now if needed, or + * alternatively install the user's IA. All other per-task keys and + * SCTLR bits were updated on task switch. + * + * No kernel C function calls after this. + */ + ldr x0, [tsk, THREAD_SCTLR_USER] + tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f + __ptrauth_keys_install_user tsk, x0, x1, x2 + b 2f +1: + mrs x0, sctlr_el1 + bic x0, x0, SCTLR_ELx_ENIA + msr sctlr_el1, x0 +2: +alternative_else_nop_endif +#endif mte_set_user_gcr tsk, x0, x1 diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 820bad94870e..125a10e413e9 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -185,26 +185,6 @@ void mte_check_tfsr_el1(void) } #endif -static void update_sctlr_el1_tcf0(u64 tcf0) -{ - /* ISB required for the kernel uaccess routines */ - sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0); - isb(); -} - -static void set_sctlr_el1_tcf0(u64 tcf0) -{ - /* - * mte_thread_switch() checks current->thread.sctlr_tcf0 as an - * optimisation. Disable preemption so that it does not see - * the variable update before the SCTLR_EL1.TCF0 one. - */ - preempt_disable(); - current->thread.sctlr_tcf0 = tcf0; - update_sctlr_el1_tcf0(tcf0); - preempt_enable(); -} - static void update_gcr_el1_excl(u64 excl) { @@ -227,7 +207,7 @@ static void set_gcr_el1_excl(u64 excl) */ } -void flush_mte_state(void) +void mte_thread_init_user(void) { if (!system_supports_mte()) return; @@ -237,31 +217,22 @@ void flush_mte_state(void) write_sysreg_s(0, SYS_TFSRE0_EL1); clear_thread_flag(TIF_MTE_ASYNC_FAULT); /* disable tag checking */ - set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE); + set_task_sctlr_el1((current->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK) | + SCTLR_EL1_TCF0_NONE); /* reset tag generation mask */ set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK); } void mte_thread_switch(struct task_struct *next) { - if (!system_supports_mte()) - return; - - /* avoid expensive SCTLR_EL1 accesses if no change */ - if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) - update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); - else - isb(); - /* * Check if an async tag exception occurred at EL1. * * Note: On the context switch path we rely on the dsb() present * in __switch_to() to guarantee that the indirect writes to TFSR_EL1 * are synchronized before this point. - * isb() above is required for the same reason. - * */ + isb(); mte_check_tfsr_el1(); } @@ -291,7 +262,7 @@ void mte_suspend_exit(void) long set_mte_ctrl(struct task_struct *task, unsigned long arg) { - u64 tcf0; + u64 sctlr = task->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK; u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) & SYS_GCR_EL1_EXCL_MASK; @@ -300,23 +271,23 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg) switch (arg & PR_MTE_TCF_MASK) { case PR_MTE_TCF_NONE: - tcf0 = SCTLR_EL1_TCF0_NONE; + sctlr |= SCTLR_EL1_TCF0_NONE; break; case PR_MTE_TCF_SYNC: - tcf0 = SCTLR_EL1_TCF0_SYNC; + sctlr |= SCTLR_EL1_TCF0_SYNC; break; case PR_MTE_TCF_ASYNC: - tcf0 = SCTLR_EL1_TCF0_ASYNC; + sctlr |= SCTLR_EL1_TCF0_ASYNC; break; default: return -EINVAL; } if (task != current) { - task->thread.sctlr_tcf0 = tcf0; + task->thread.sctlr_user = sctlr; task->thread.gcr_user_excl = gcr_excl; } else { - set_sctlr_el1_tcf0(tcf0); + set_task_sctlr_el1(sctlr); set_gcr_el1_excl(gcr_excl); } @@ -333,7 +304,7 @@ long get_mte_ctrl(struct task_struct *task) ret = incl << PR_MTE_TAG_SHIFT; - switch (task->thread.sctlr_tcf0) { + switch (task->thread.sctlr_user & SCTLR_EL1_TCF0_MASK) { case SCTLR_EL1_TCF0_NONE: ret |= PR_MTE_TCF_NONE; break; diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c index adb955fd9bdd..60901ab0a7fe 100644 --- a/arch/arm64/kernel/pointer_auth.c +++ b/arch/arm64/kernel/pointer_auth.c @@ -43,6 +43,69 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) get_random_bytes(&keys->apdb, sizeof(keys->apdb)); if (arg & PR_PAC_APGAKEY) get_random_bytes(&keys->apga, sizeof(keys->apga)); + ptrauth_keys_install_user(keys); return 0; } + +static u64 arg_to_enxx_mask(unsigned long arg) +{ + u64 sctlr_enxx_mask = 0; + + WARN_ON(arg & ~PR_PAC_ENABLED_KEYS_MASK); + if (arg & PR_PAC_APIAKEY) + sctlr_enxx_mask |= SCTLR_ELx_ENIA; + if (arg & PR_PAC_APIBKEY) + sctlr_enxx_mask |= SCTLR_ELx_ENIB; + if (arg & PR_PAC_APDAKEY) + sctlr_enxx_mask |= SCTLR_ELx_ENDA; + if (arg & PR_PAC_APDBKEY) + sctlr_enxx_mask |= SCTLR_ELx_ENDB; + return sctlr_enxx_mask; +} + +int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys, + unsigned long enabled) +{ + u64 sctlr = tsk->thread.sctlr_user; + + if (!system_supports_address_auth()) + return -EINVAL; + + if (is_compat_thread(task_thread_info(tsk))) + return -EINVAL; + + if ((keys & ~PR_PAC_ENABLED_KEYS_MASK) || (enabled & ~keys)) + return -EINVAL; + + sctlr &= ~arg_to_enxx_mask(keys); + sctlr |= arg_to_enxx_mask(enabled); + if (tsk == current) + set_task_sctlr_el1(sctlr); + else + tsk->thread.sctlr_user = sctlr; + + return 0; +} + +int ptrauth_get_enabled_keys(struct task_struct *tsk) +{ + int retval = 0; + + if (!system_supports_address_auth()) + return -EINVAL; + + if (is_compat_thread(task_thread_info(tsk))) + return -EINVAL; + + if (tsk->thread.sctlr_user & SCTLR_ELx_ENIA) + retval |= PR_PAC_APIAKEY; + if (tsk->thread.sctlr_user & SCTLR_ELx_ENIB) + retval |= PR_PAC_APIBKEY; + if (tsk->thread.sctlr_user & SCTLR_ELx_ENDA) + retval |= PR_PAC_APDAKEY; + if (tsk->thread.sctlr_user & SCTLR_ELx_ENDB) + retval |= PR_PAC_APDBKEY; + + return retval; +} diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index a29028d3d46e..6aa053462400 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -339,7 +339,6 @@ void flush_thread(void) tls_thread_flush(); flush_ptrace_hw_breakpoint(current); flush_tagged_addr_state(); - flush_mte_state(); } void release_thread(struct task_struct *dead_task) @@ -529,6 +528,31 @@ static void erratum_1418040_thread_switch(struct task_struct *prev, write_sysreg(val, cntkctl_el1); } +static void update_sctlr_el1(u64 sctlr) +{ + /* + * EnIA must not be cleared while in the kernel as this is necessary for + * in-kernel PAC. It will be cleared on kernel exit if needed. + */ + sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr); + + /* ISB required for the kernel uaccess routines when setting TCF0. */ + isb(); +} + +void set_task_sctlr_el1(u64 sctlr) +{ + /* + * __switch_to() checks current->thread.sctlr as an + * optimisation. Disable preemption so that it does not see + * the variable update before the SCTLR_EL1 one. + */ + preempt_disable(); + current->thread.sctlr_user = sctlr; + update_sctlr_el1(sctlr); + preempt_enable(); +} + /* * Thread switching. */ @@ -544,6 +568,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, entry_task_switch(next); ssbs_thread_switch(next); erratum_1418040_thread_switch(prev, next); + ptrauth_thread_switch_user(next); /* * Complete any pending TLB or cache maintenance on this CPU in case @@ -559,6 +584,9 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, * registers. */ mte_thread_switch(next); + /* avoid expensive SCTLR_EL1 accesses if no change */ + if (prev->thread.sctlr_user != next->thread.sctlr_user) + update_sctlr_el1(next->thread.sctlr_user); /* the actual thread switch */ last = cpu_switch_to(prev, next); @@ -608,7 +636,8 @@ void arch_setup_new_exec(void) { current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; - ptrauth_thread_init_user(current); + ptrauth_thread_init_user(); + mte_thread_init_user(); if (task_spec_ssb_noexec(current)) { arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 170f42fd6101..eb2f73939b7b 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -909,6 +909,38 @@ static int pac_mask_get(struct task_struct *target, return membuf_write(&to, &uregs, sizeof(uregs)); } +static int pac_enabled_keys_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + long enabled_keys = ptrauth_get_enabled_keys(target); + + if (IS_ERR_VALUE(enabled_keys)) + return enabled_keys; + + return membuf_write(&to, &enabled_keys, sizeof(enabled_keys)); +} + +static int pac_enabled_keys_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + long enabled_keys = ptrauth_get_enabled_keys(target); + + if (IS_ERR_VALUE(enabled_keys)) + return enabled_keys; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0, + sizeof(long)); + if (ret) + return ret; + + return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK, + enabled_keys); +} + #ifdef CONFIG_CHECKPOINT_RESTORE static __uint128_t pac_key_to_user(const struct ptrauth_key *key) { @@ -1074,6 +1106,7 @@ enum aarch64_regset { #endif #ifdef CONFIG_ARM64_PTR_AUTH REGSET_PAC_MASK, + REGSET_PAC_ENABLED_KEYS, #ifdef CONFIG_CHECKPOINT_RESTORE REGSET_PACA_KEYS, REGSET_PACG_KEYS, @@ -1160,6 +1193,14 @@ static const struct user_regset aarch64_regsets[] = { .regset_get = pac_mask_get, /* this cannot be set dynamically */ }, + [REGSET_PAC_ENABLED_KEYS] = { + .core_note_type = NT_ARM_PAC_ENABLED_KEYS, + .n = 1, + .size = sizeof(long), + .align = sizeof(long), + .regset_get = pac_enabled_keys_get, + .set = pac_enabled_keys_set, + }, #ifdef CONFIG_CHECKPOINT_RESTORE [REGSET_PACA_KEYS] = { .core_note_type = NT_ARM_PACA_KEYS, diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 6fdc8292b4f5..e3f72df9509d 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -74,8 +74,9 @@ void notrace __cpu_suspend_exit(void) */ spectre_v4_enable_mitigation(NULL); - /* Restore additional MTE-specific configuration */ + /* Restore additional feature-specific configuration */ mte_suspend_exit(); + ptrauth_suspend_exit(); } /* |