diff options
Diffstat (limited to 'arch/x86/kernel/kvmclock.c')
| -rw-r--r-- | arch/x86/kernel/kvmclock.c | 263 | 
1 files changed, 105 insertions, 158 deletions
| diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index bf8d1eb7fca3..1e6764648af3 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -23,30 +23,57 @@  #include <asm/apic.h>  #include <linux/percpu.h>  #include <linux/hardirq.h> -#include <linux/memblock.h> +#include <linux/cpuhotplug.h>  #include <linux/sched.h>  #include <linux/sched/clock.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <asm/hypervisor.h>  #include <asm/mem_encrypt.h>  #include <asm/x86_init.h>  #include <asm/reboot.h>  #include <asm/kvmclock.h> -static int kvmclock __ro_after_init = 1; -static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME; -static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK; -static u64 kvm_sched_clock_offset; +static int kvmclock __initdata = 1; +static int kvmclock_vsyscall __initdata = 1; +static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME; +static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK; +static u64 kvm_sched_clock_offset __ro_after_init; -static int parse_no_kvmclock(char *arg) +static int __init parse_no_kvmclock(char *arg)  {  	kvmclock = 0;  	return 0;  }  early_param("no-kvmclock", parse_no_kvmclock); -/* The hypervisor will put information about time periodically here */ -static struct pvclock_vsyscall_time_info *hv_clock; -static struct pvclock_wall_clock *wall_clock; +static int __init parse_no_kvmclock_vsyscall(char *arg) +{ +	kvmclock_vsyscall = 0; +	return 0; +} +early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall); + +/* Aligned to page sizes to match whats mapped via vsyscalls to userspace */ +#define HV_CLOCK_SIZE	(sizeof(struct pvclock_vsyscall_time_info) * NR_CPUS) +#define HVC_BOOT_ARRAY_SIZE \ +	(PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info)) + +static struct pvclock_vsyscall_time_info +			hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE); +static struct pvclock_wall_clock wall_clock; +static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); + +static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) +{ +	return &this_cpu_read(hv_clock_per_cpu)->pvti; +} + +static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void) +{ +	return this_cpu_read(hv_clock_per_cpu); +}  /*   * The wallclock is the time of day when we booted. Since then, some time may @@ -55,21 +82,10 @@ static struct pvclock_wall_clock *wall_clock;   */  static void kvm_get_wallclock(struct timespec64 *now)  { -	struct pvclock_vcpu_time_info *vcpu_time; -	int low, high; -	int cpu; - -	low = (int)slow_virt_to_phys(wall_clock); -	high = ((u64)slow_virt_to_phys(wall_clock) >> 32); - -	native_write_msr(msr_kvm_wall_clock, low, high); - -	cpu = get_cpu(); - -	vcpu_time = &hv_clock[cpu].pvti; -	pvclock_read_wallclock(wall_clock, vcpu_time, now); - -	put_cpu(); +	wrmsrl(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock)); +	preempt_disable(); +	pvclock_read_wallclock(&wall_clock, this_cpu_pvti(), now); +	preempt_enable();  }  static int kvm_set_wallclock(const struct timespec64 *now) @@ -79,14 +95,10 @@ static int kvm_set_wallclock(const struct timespec64 *now)  static u64 kvm_clock_read(void)  { -	struct pvclock_vcpu_time_info *src;  	u64 ret; -	int cpu;  	preempt_disable_notrace(); -	cpu = smp_processor_id(); -	src = &hv_clock[cpu].pvti; -	ret = pvclock_clocksource_read(src); +	ret = pvclock_clocksource_read(this_cpu_pvti());  	preempt_enable_notrace();  	return ret;  } @@ -112,11 +124,11 @@ static inline void kvm_sched_clock_init(bool stable)  	kvm_sched_clock_offset = kvm_clock_read();  	pv_time_ops.sched_clock = kvm_sched_clock_read; -	printk(KERN_INFO "kvm-clock: using sched offset of %llu cycles\n", -			kvm_sched_clock_offset); +	pr_info("kvm-clock: using sched offset of %llu cycles", +		kvm_sched_clock_offset);  	BUILD_BUG_ON(sizeof(kvm_sched_clock_offset) > -	         sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time)); +		sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time));  }  /* @@ -130,18 +142,11 @@ static inline void kvm_sched_clock_init(bool stable)   */  static unsigned long kvm_get_tsc_khz(void)  { -	struct pvclock_vcpu_time_info *src; -	int cpu; -	unsigned long tsc_khz; - -	cpu = get_cpu(); -	src = &hv_clock[cpu].pvti; -	tsc_khz = pvclock_tsc_khz(src); -	put_cpu(); -	return tsc_khz; +	setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); +	return pvclock_tsc_khz(this_cpu_pvti());  } -static void kvm_get_preset_lpj(void) +static void __init kvm_get_preset_lpj(void)  {  	unsigned long khz;  	u64 lpj; @@ -155,49 +160,40 @@ static void kvm_get_preset_lpj(void)  bool kvm_check_and_clear_guest_paused(void)  { +	struct pvclock_vsyscall_time_info *src = this_cpu_hvclock();  	bool ret = false; -	struct pvclock_vcpu_time_info *src; -	int cpu = smp_processor_id(); -	if (!hv_clock) +	if (!src)  		return ret; -	src = &hv_clock[cpu].pvti; -	if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { -		src->flags &= ~PVCLOCK_GUEST_STOPPED; +	if ((src->pvti.flags & PVCLOCK_GUEST_STOPPED) != 0) { +		src->pvti.flags &= ~PVCLOCK_GUEST_STOPPED;  		pvclock_touch_watchdogs();  		ret = true;  	} -  	return ret;  }  struct clocksource kvm_clock = { -	.name = "kvm-clock", -	.read = kvm_clock_get_cycles, -	.rating = 400, -	.mask = CLOCKSOURCE_MASK(64), -	.flags = CLOCK_SOURCE_IS_CONTINUOUS, +	.name	= "kvm-clock", +	.read	= kvm_clock_get_cycles, +	.rating	= 400, +	.mask	= CLOCKSOURCE_MASK(64), +	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,  };  EXPORT_SYMBOL_GPL(kvm_clock); -int kvm_register_clock(char *txt) +static void kvm_register_clock(char *txt)  { -	int cpu = smp_processor_id(); -	int low, high, ret; -	struct pvclock_vcpu_time_info *src; - -	if (!hv_clock) -		return 0; +	struct pvclock_vsyscall_time_info *src = this_cpu_hvclock(); +	u64 pa; -	src = &hv_clock[cpu].pvti; -	low = (int)slow_virt_to_phys(src) | 1; -	high = ((u64)slow_virt_to_phys(src) >> 32); -	ret = native_write_msr_safe(msr_kvm_system_time, low, high); -	printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n", -	       cpu, high, low, txt); +	if (!src) +		return; -	return ret; +	pa = slow_virt_to_phys(&src->pvti) | 0x01ULL; +	wrmsrl(msr_kvm_system_time, pa); +	pr_info("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt);  }  static void kvm_save_sched_clock_state(void) @@ -212,11 +208,7 @@ static void kvm_restore_sched_clock_state(void)  #ifdef CONFIG_X86_LOCAL_APIC  static void kvm_setup_secondary_clock(void)  { -	/* -	 * Now that the first cpu already had this clocksource initialized, -	 * we shouldn't fail. -	 */ -	WARN_ON(kvm_register_clock("secondary cpu clock")); +	kvm_register_clock("secondary cpu clock");  }  #endif @@ -244,98 +236,84 @@ static void kvm_shutdown(void)  	native_machine_shutdown();  } -static phys_addr_t __init kvm_memblock_alloc(phys_addr_t size, -					     phys_addr_t align) +static int __init kvm_setup_vsyscall_timeinfo(void)  { -	phys_addr_t mem; +#ifdef CONFIG_X86_64 +	u8 flags; -	mem = memblock_alloc(size, align); -	if (!mem) +	if (!per_cpu(hv_clock_per_cpu, 0) || !kvmclock_vsyscall)  		return 0; -	if (sev_active()) { -		if (early_set_memory_decrypted((unsigned long)__va(mem), size)) -			goto e_free; -	} +	flags = pvclock_read_flags(&hv_clock_boot[0].pvti); +	if (!(flags & PVCLOCK_TSC_STABLE_BIT)) +		return 0; -	return mem; -e_free: -	memblock_free(mem, size); +	kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; +#endif  	return 0;  } +early_initcall(kvm_setup_vsyscall_timeinfo); -static void __init kvm_memblock_free(phys_addr_t addr, phys_addr_t size) +static int kvmclock_setup_percpu(unsigned int cpu)  { -	if (sev_active()) -		early_set_memory_encrypted((unsigned long)__va(addr), size); +	struct pvclock_vsyscall_time_info *p = per_cpu(hv_clock_per_cpu, cpu); + +	/* +	 * The per cpu area setup replicates CPU0 data to all cpu +	 * pointers. So carefully check. CPU0 has been set up in init +	 * already. +	 */ +	if (!cpu || (p && p != per_cpu(hv_clock_per_cpu, 0))) +		return 0; + +	/* Use the static page for the first CPUs, allocate otherwise */ +	if (cpu < HVC_BOOT_ARRAY_SIZE) +		p = &hv_clock_boot[cpu]; +	else +		p = kzalloc(sizeof(*p), GFP_KERNEL); -	memblock_free(addr, size); +	per_cpu(hv_clock_per_cpu, cpu) = p; +	return p ? 0 : -ENOMEM;  }  void __init kvmclock_init(void)  { -	struct pvclock_vcpu_time_info *vcpu_time; -	unsigned long mem, mem_wall_clock; -	int size, cpu, wall_clock_size;  	u8 flags; -	size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS); - -	if (!kvm_para_available()) +	if (!kvm_para_available() || !kvmclock)  		return; -	if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) { +	if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {  		msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;  		msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW; -	} else if (!(kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE))) -		return; - -	wall_clock_size = PAGE_ALIGN(sizeof(struct pvclock_wall_clock)); -	mem_wall_clock = kvm_memblock_alloc(wall_clock_size, PAGE_SIZE); -	if (!mem_wall_clock) -		return; - -	wall_clock = __va(mem_wall_clock); -	memset(wall_clock, 0, wall_clock_size); - -	mem = kvm_memblock_alloc(size, PAGE_SIZE); -	if (!mem) { -		kvm_memblock_free(mem_wall_clock, wall_clock_size); -		wall_clock = NULL; +	} else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {  		return;  	} -	hv_clock = __va(mem); -	memset(hv_clock, 0, size); - -	if (kvm_register_clock("primary cpu clock")) { -		hv_clock = NULL; -		kvm_memblock_free(mem, size); -		kvm_memblock_free(mem_wall_clock, wall_clock_size); -		wall_clock = NULL; +	if (cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "kvmclock:setup_percpu", +			      kvmclock_setup_percpu, NULL) < 0) {  		return;  	} -	printk(KERN_INFO "kvm-clock: Using msrs %x and %x", +	pr_info("kvm-clock: Using msrs %x and %x",  		msr_kvm_system_time, msr_kvm_wall_clock); +	this_cpu_write(hv_clock_per_cpu, &hv_clock_boot[0]); +	kvm_register_clock("primary cpu clock"); +	pvclock_set_pvti_cpu0_va(hv_clock_boot); +  	if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))  		pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); -	cpu = get_cpu(); -	vcpu_time = &hv_clock[cpu].pvti; -	flags = pvclock_read_flags(vcpu_time); - +	flags = pvclock_read_flags(&hv_clock_boot[0].pvti);  	kvm_sched_clock_init(flags & PVCLOCK_TSC_STABLE_BIT); -	put_cpu();  	x86_platform.calibrate_tsc = kvm_get_tsc_khz;  	x86_platform.calibrate_cpu = kvm_get_tsc_khz;  	x86_platform.get_wallclock = kvm_get_wallclock;  	x86_platform.set_wallclock = kvm_set_wallclock;  #ifdef CONFIG_X86_LOCAL_APIC -	x86_cpuinit.early_percpu_clock_init = -		kvm_setup_secondary_clock; +	x86_cpuinit.early_percpu_clock_init = kvm_setup_secondary_clock;  #endif  	x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;  	x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state; @@ -347,34 +325,3 @@ void __init kvmclock_init(void)  	clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);  	pv_info.name = "KVM";  } - -int __init kvm_setup_vsyscall_timeinfo(void) -{ -#ifdef CONFIG_X86_64 -	int cpu; -	u8 flags; -	struct pvclock_vcpu_time_info *vcpu_time; -	unsigned int size; - -	if (!hv_clock) -		return 0; - -	size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS); - -	cpu = get_cpu(); - -	vcpu_time = &hv_clock[cpu].pvti; -	flags = pvclock_read_flags(vcpu_time); - -	if (!(flags & PVCLOCK_TSC_STABLE_BIT)) { -		put_cpu(); -		return 1; -	} - -	pvclock_set_pvti_cpu0_va(hv_clock); -	put_cpu(); - -	kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; -#endif -	return 0; -} |