diff options
Diffstat (limited to 'arch/x86/mm/pti.c')
| -rw-r--r-- | arch/x86/mm/pti.c | 33 | 
1 files changed, 32 insertions, 1 deletions
| diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index c1fc1ae6b429..4fee5c3003ed 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -434,11 +434,42 @@ static void __init pti_clone_p4d(unsigned long addr)  }  /* - * Clone the CPU_ENTRY_AREA into the user space visible page table. + * Clone the CPU_ENTRY_AREA and associated data into the user space visible + * page table.   */  static void __init pti_clone_user_shared(void)  { +	unsigned int cpu; +  	pti_clone_p4d(CPU_ENTRY_AREA_BASE); + +	for_each_possible_cpu(cpu) { +		/* +		 * The SYSCALL64 entry code needs to be able to find the +		 * thread stack and needs one word of scratch space in which +		 * to spill a register.  All of this lives in the TSS, in +		 * the sp1 and sp2 slots. +		 * +		 * This is done for all possible CPUs during boot to ensure +		 * that it's propagated to all mms.  If we were to add one of +		 * these mappings during CPU hotplug, we would need to take +		 * some measure to make sure that every mm that subsequently +		 * ran on that CPU would have the relevant PGD entry in its +		 * pagetables.  The usual vmalloc_fault() mechanism would not +		 * work for page faults taken in entry_SYSCALL_64 before RSP +		 * is set up. +		 */ + +		unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu); +		phys_addr_t pa = per_cpu_ptr_to_phys((void *)va); +		pte_t *target_pte; + +		target_pte = pti_user_pagetable_walk_pte(va); +		if (WARN_ON(!target_pte)) +			return; + +		*target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL); +	}  }  #else /* CONFIG_X86_64 */ |