diff options
| author | Mark Brown <[email protected]> | 2021-06-09 11:52:35 +0100 |
|---|---|---|
| committer | Mark Brown <[email protected]> | 2021-06-09 11:52:35 +0100 |
| commit | c441bfb5f2866de71e092c1b9d866a65978dfe1a (patch) | |
| tree | 51aca8f1e4b2de5819d4b83a4646f8448de75546 /include/linux/randomize_kstack.h | |
| parent | bcc0f0c078771e983a7e602eb14efa02f811445f (diff) | |
| parent | c4681547bcce777daf576925a966ffa824edd09d (diff) | |
Merge tag 'v5.13-rc3' into asoc-5.13
Linux 5.13-rc3
Diffstat (limited to 'include/linux/randomize_kstack.h')
| -rw-r--r-- | include/linux/randomize_kstack.h | 54 |
1 files changed, 54 insertions, 0 deletions
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h new file mode 100644 index 000000000000..bebc911161b6 --- /dev/null +++ b/include/linux/randomize_kstack.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _LINUX_RANDOMIZE_KSTACK_H +#define _LINUX_RANDOMIZE_KSTACK_H + +#include <linux/kernel.h> +#include <linux/jump_label.h> +#include <linux/percpu-defs.h> + +DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, + randomize_kstack_offset); +DECLARE_PER_CPU(u32, kstack_offset); + +/* + * Do not use this anywhere else in the kernel. This is used here because + * it provides an arch-agnostic way to grow the stack with correct + * alignment. Also, since this use is being explicitly masked to a max of + * 10 bits, stack-clash style attacks are unlikely. For more details see + * "VLAs" in Documentation/process/deprecated.rst + */ +void *__builtin_alloca(size_t size); +/* + * Use, at most, 10 bits of entropy. We explicitly cap this to keep the + * "VLA" from being unbounded (see above). 10 bits leaves enough room for + * per-arch offset masks to reduce entropy (by removing higher bits, since + * high entropy may overly constrain usable stack space), and for + * compiler/arch-specific stack alignment to remove the lower bits. + */ +#define KSTACK_OFFSET_MAX(x) ((x) & 0x3FF) + +/* + * These macros must be used during syscall entry when interrupts and + * preempt are disabled, and after user registers have been stored to + * the stack. + */ +#define add_random_kstack_offset() do { \ + if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ + &randomize_kstack_offset)) { \ + u32 offset = raw_cpu_read(kstack_offset); \ + u8 *ptr = __builtin_alloca(KSTACK_OFFSET_MAX(offset)); \ + /* Keep allocation even after "ptr" loses scope. */ \ + asm volatile("" :: "r"(ptr) : "memory"); \ + } \ +} while (0) + +#define choose_random_kstack_offset(rand) do { \ + if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ + &randomize_kstack_offset)) { \ + u32 offset = raw_cpu_read(kstack_offset); \ + offset ^= (rand); \ + raw_cpu_write(kstack_offset, offset); \ + } \ +} while (0) + +#endif |