diff options
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
| -rw-r--r-- | arch/x86/kernel/cpu/common.c | 128 | 
1 files changed, 72 insertions, 56 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 80710a68ef7d..8075eb45a3a4 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -18,12 +18,16 @@  #include <linux/init.h>  #include <linux/kprobes.h>  #include <linux/kgdb.h> +#include <linux/mem_encrypt.h>  #include <linux/smp.h> +#include <linux/cpu.h>  #include <linux/io.h>  #include <linux/syscore_ops.h>  #include <linux/pgtable.h>  #include <linux/stackprotector.h> +#include <linux/utsname.h> +#include <asm/alternative.h>  #include <asm/cmdline.h>  #include <asm/perf_event.h>  #include <asm/mmu_context.h> @@ -59,7 +63,7 @@  #include <asm/intel-family.h>  #include <asm/cpu_device_id.h>  #include <asm/uv/uv.h> -#include <asm/sigframe.h> +#include <asm/set_memory.h>  #include <asm/traps.h>  #include <asm/sev.h> @@ -67,14 +71,6 @@  u32 elf_hwcap2 __read_mostly; -/* all of these masks are initialized in setup_cpu_local_masks() */ -cpumask_var_t cpu_initialized_mask; -cpumask_var_t cpu_callout_mask; -cpumask_var_t cpu_callin_mask; - -/* representing cpus for which sibling maps can be computed */ -cpumask_var_t cpu_sibling_setup_mask; -  /* Number of siblings per CPU package */  int smp_num_siblings = 1;  EXPORT_SYMBOL(smp_num_siblings); @@ -169,15 +165,6 @@ clear_ppin:  	clear_cpu_cap(c, info->feature);  } -/* correctly size the local cpu masks */ -void __init setup_cpu_local_masks(void) -{ -	alloc_bootmem_cpumask_var(&cpu_initialized_mask); -	alloc_bootmem_cpumask_var(&cpu_callin_mask); -	alloc_bootmem_cpumask_var(&cpu_callout_mask); -	alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); -} -  static void default_init(struct cpuinfo_x86 *c)  {  #ifdef CONFIG_X86_64 @@ -1600,10 +1587,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)  	sld_setup(c); -	fpu__init_system(c); - -	init_sigframe_size(); -  #ifdef CONFIG_X86_32  	/*  	 * Regardless of whether PCID is enumerated, the SDM says @@ -2123,19 +2106,6 @@ static void dbg_restore_debug_regs(void)  #define dbg_restore_debug_regs()  #endif /* ! CONFIG_KGDB */ -static void wait_for_master_cpu(int cpu) -{ -#ifdef CONFIG_SMP -	/* -	 * wait for ACK from master CPU before continuing -	 * with AP initialization -	 */ -	WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)); -	while (!cpumask_test_cpu(cpu, cpu_callout_mask)) -		cpu_relax(); -#endif -} -  static inline void setup_getcpu(int cpu)  {  	unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu)); @@ -2158,11 +2128,7 @@ static inline void setup_getcpu(int cpu)  }  #ifdef CONFIG_X86_64 -static inline void ucode_cpu_init(int cpu) -{ -	if (cpu) -		load_ucode_ap(); -} +static inline void ucode_cpu_init(int cpu) { }  static inline void tss_setup_ist(struct tss_struct *tss)  { @@ -2239,8 +2205,6 @@ void cpu_init(void)  	struct task_struct *cur = current;  	int cpu = raw_smp_processor_id(); -	wait_for_master_cpu(cpu); -  	ucode_cpu_init(cpu);  #ifdef CONFIG_NUMA @@ -2285,26 +2249,12 @@ void cpu_init(void)  	doublefault_init_cpu_tss(); -	fpu__init_cpu(); -  	if (is_uv_system())  		uv_cpu_init();  	load_fixmap_gdt(cpu);  } -#ifdef CONFIG_SMP -void cpu_init_secondary(void) -{ -	/* -	 * Relies on the BP having set-up the IDT tables, which are loaded -	 * on this CPU in cpu_init_exception_handling(). -	 */ -	cpu_init_exception_handling(); -	cpu_init(); -} -#endif -  #ifdef CONFIG_MICROCODE_LATE_LOADING  /**   * store_cpu_caps() - Store a snapshot of CPU capabilities @@ -2362,3 +2312,69 @@ void arch_smt_update(void)  	/* Check whether IPI broadcasting can be enabled */  	apic_smt_update();  } + +void __init arch_cpu_finalize_init(void) +{ +	identify_boot_cpu(); + +	/* +	 * identify_boot_cpu() initialized SMT support information, let the +	 * core code know. +	 */ +	cpu_smt_check_topology(); + +	if (!IS_ENABLED(CONFIG_SMP)) { +		pr_info("CPU: "); +		print_cpu_info(&boot_cpu_data); +	} + +	cpu_select_mitigations(); + +	arch_smt_update(); + +	if (IS_ENABLED(CONFIG_X86_32)) { +		/* +		 * Check whether this is a real i386 which is not longer +		 * supported and fixup the utsname. +		 */ +		if (boot_cpu_data.x86 < 4) +			panic("Kernel requires i486+ for 'invlpg' and other features"); + +		init_utsname()->machine[1] = +			'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); +	} + +	/* +	 * Must be before alternatives because it might set or clear +	 * feature bits. +	 */ +	fpu__init_system(); +	fpu__init_cpu(); + +	alternative_instructions(); + +	if (IS_ENABLED(CONFIG_X86_64)) { +		/* +		 * Make sure the first 2MB area is not mapped by huge pages +		 * There are typically fixed size MTRRs in there and overlapping +		 * MTRRs into large pages causes slow downs. +		 * +		 * Right now we don't do that with gbpages because there seems +		 * very little benefit for that case. +		 */ +		if (!direct_gbpages) +			set_memory_4k((unsigned long)__va(0), 1); +	} else { +		fpu__init_check_bugs(); +	} + +	/* +	 * This needs to be called before any devices perform DMA +	 * operations that might use the SWIOTLB bounce buffers. It will +	 * mark the bounce buffers as decrypted so that their usage will +	 * not cause "plain-text" data to be decrypted when accessed. It +	 * must be called after late_time_init() so that Hyper-V x86/x64 +	 * hypercalls work when the SWIOTLB bounce buffers are decrypted. +	 */ +	mem_encrypt_init(); +}  |