diff options
author | David S. Miller <[email protected]> | 2018-01-09 10:37:00 -0500 |
---|---|---|
committer | David S. Miller <[email protected]> | 2018-01-09 10:37:00 -0500 |
commit | a0ce093180f2bbb832b3f5583adc640ad67ea568 (patch) | |
tree | 62c9aca5be3566ea4810e4584a02870de8b953f8 /arch/x86/include/asm/processor.h | |
parent | f4803f1b73f877a571be4c8e531dfcf190acc691 (diff) | |
parent | ef7f8cec80a0ba7bd00ece46844c8994117dc910 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'arch/x86/include/asm/processor.h')
-rw-r--r-- | arch/x86/include/asm/processor.h | 23 |
1 files changed, 16 insertions, 7 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index cad8dab266bc..d3a67fba200a 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -852,13 +852,22 @@ static inline void spin_lock_prefetch(const void *x) #else /* - * User space process size. 47bits minus one guard page. The guard - * page is necessary on Intel CPUs: if a SYSCALL instruction is at - * the highest possible canonical userspace address, then that - * syscall will enter the kernel with a non-canonical return - * address, and SYSRET will explode dangerously. We avoid this - * particular problem by preventing anything from being mapped - * at the maximum canonical address. + * User space process size. This is the first address outside the user range. + * There are a few constraints that determine this: + * + * On Intel CPUs, if a SYSCALL instruction is at the highest canonical + * address, then that syscall will enter the kernel with a + * non-canonical return address, and SYSRET will explode dangerously. + * We avoid this particular problem by preventing anything executable + * from being mapped at the maximum canonical address. + * + * On AMD CPUs in the Ryzen family, there's a nasty bug in which the + * CPUs malfunction if they execute code from the highest canonical page. + * They'll speculate right off the end of the canonical space, and + * bad things happen. This is worked around in the same way as the + * Intel problem. + * + * With page table isolation enabled, we map the LDT in ... [stay tuned] */ #define TASK_SIZE_MAX ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) |