diff options
Diffstat (limited to 'virt/kvm/kvm_main.c')
| -rw-r--r-- | virt/kvm/kvm_main.c | 128 | 
1 files changed, 63 insertions, 65 deletions
| diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 34310a8d24b9..0d06b7b63e95 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -16,7 +16,7 @@   *   */ -#include "iodev.h" +#include <kvm/iodev.h>  #include <linux/kvm_host.h>  #include <linux/kvm.h> @@ -66,13 +66,13 @@  MODULE_AUTHOR("Qumranet");  MODULE_LICENSE("GPL"); -unsigned int halt_poll_ns = 0; +static unsigned int halt_poll_ns;  module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);  /*   * Ordering of locks:   * - * 		kvm->lock --> kvm->slots_lock --> kvm->irq_lock + *	kvm->lock --> kvm->slots_lock --> kvm->irq_lock   */  DEFINE_SPINLOCK(kvm_lock); @@ -80,7 +80,7 @@ static DEFINE_RAW_SPINLOCK(kvm_count_lock);  LIST_HEAD(vm_list);  static cpumask_var_t cpus_hardware_enabled; -static int kvm_usage_count = 0; +static int kvm_usage_count;  static atomic_t hardware_enable_failed;  struct kmem_cache *kvm_vcpu_cache; @@ -539,20 +539,12 @@ void *kvm_kvzalloc(unsigned long size)  		return kzalloc(size, GFP_KERNEL);  } -void kvm_kvfree(const void *addr) -{ -	if (is_vmalloc_addr(addr)) -		vfree(addr); -	else -		kfree(addr); -} -  static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)  {  	if (!memslot->dirty_bitmap)  		return; -	kvm_kvfree(memslot->dirty_bitmap); +	kvfree(memslot->dirty_bitmap);  	memslot->dirty_bitmap = NULL;  } @@ -888,8 +880,8 @@ int __kvm_set_memory_region(struct kvm *kvm,  		 * or moved, memslot will be created.  		 *  		 * validation of sp->gfn happens in: -		 * 	- gfn_to_hva (kvm_read_guest, gfn_to_pfn) -		 * 	- kvm_is_visible_gfn (mmu_check_roots) +		 *	- gfn_to_hva (kvm_read_guest, gfn_to_pfn) +		 *	- kvm_is_visible_gfn (mmu_check_roots)  		 */  		kvm_arch_flush_shadow_memslot(kvm, slot); @@ -1061,9 +1053,11 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,  		mask = xchg(&dirty_bitmap[i], 0);  		dirty_bitmap_buffer[i] = mask; -		offset = i * BITS_PER_LONG; -		kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, -								mask); +		if (mask) { +			offset = i * BITS_PER_LONG; +			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, +								offset, mask); +		}  	}  	spin_unlock(&kvm->mmu_lock); @@ -1481,7 +1475,6 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)  	return kvm_pfn_to_page(pfn);  } -  EXPORT_SYMBOL_GPL(gfn_to_page);  void kvm_release_page_clean(struct page *page) @@ -1517,6 +1510,7 @@ void kvm_set_pfn_dirty(pfn_t pfn)  {  	if (!kvm_is_reserved_pfn(pfn)) {  		struct page *page = pfn_to_page(pfn); +  		if (!PageReserved(page))  			SetPageDirty(page);  	} @@ -1742,7 +1736,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)  	int offset = offset_in_page(gpa);  	int ret; -        while ((seg = next_segment(len, offset)) != 0) { +	while ((seg = next_segment(len, offset)) != 0) {  		ret = kvm_clear_guest_page(kvm, gfn, offset, seg);  		if (ret < 0)  			return ret; @@ -1800,6 +1794,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)  	start = cur = ktime_get();  	if (halt_poll_ns) {  		ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns); +  		do {  			/*  			 * This sets KVM_REQ_UNHALT if an interrupt @@ -2135,6 +2130,7 @@ static long kvm_vcpu_ioctl(struct file *filp,  			/* The thread running this VCPU changed. */  			struct pid *oldpid = vcpu->pid;  			struct pid *newpid = get_task_pid(current, PIDTYPE_PID); +  			rcu_assign_pointer(vcpu->pid, newpid);  			if (oldpid)  				synchronize_rcu(); @@ -2205,7 +2201,7 @@ out_free1:  		if (r)  			goto out;  		r = -EFAULT; -		if (copy_to_user(argp, &mp_state, sizeof mp_state)) +		if (copy_to_user(argp, &mp_state, sizeof(mp_state)))  			goto out;  		r = 0;  		break; @@ -2214,7 +2210,7 @@ out_free1:  		struct kvm_mp_state mp_state;  		r = -EFAULT; -		if (copy_from_user(&mp_state, argp, sizeof mp_state)) +		if (copy_from_user(&mp_state, argp, sizeof(mp_state)))  			goto out;  		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);  		break; @@ -2223,13 +2219,13 @@ out_free1:  		struct kvm_translation tr;  		r = -EFAULT; -		if (copy_from_user(&tr, argp, sizeof tr)) +		if (copy_from_user(&tr, argp, sizeof(tr)))  			goto out;  		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);  		if (r)  			goto out;  		r = -EFAULT; -		if (copy_to_user(argp, &tr, sizeof tr)) +		if (copy_to_user(argp, &tr, sizeof(tr)))  			goto out;  		r = 0;  		break; @@ -2238,7 +2234,7 @@ out_free1:  		struct kvm_guest_debug dbg;  		r = -EFAULT; -		if (copy_from_user(&dbg, argp, sizeof dbg)) +		if (copy_from_user(&dbg, argp, sizeof(dbg)))  			goto out;  		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);  		break; @@ -2252,14 +2248,14 @@ out_free1:  		if (argp) {  			r = -EFAULT;  			if (copy_from_user(&kvm_sigmask, argp, -					   sizeof kvm_sigmask)) +					   sizeof(kvm_sigmask)))  				goto out;  			r = -EINVAL; -			if (kvm_sigmask.len != sizeof sigset) +			if (kvm_sigmask.len != sizeof(sigset))  				goto out;  			r = -EFAULT;  			if (copy_from_user(&sigset, sigmask_arg->sigset, -					   sizeof sigset)) +					   sizeof(sigset)))  				goto out;  			p = &sigset;  		} @@ -2321,14 +2317,14 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,  		if (argp) {  			r = -EFAULT;  			if (copy_from_user(&kvm_sigmask, argp, -					   sizeof kvm_sigmask)) +					   sizeof(kvm_sigmask)))  				goto out;  			r = -EINVAL; -			if (kvm_sigmask.len != sizeof csigset) +			if (kvm_sigmask.len != sizeof(csigset))  				goto out;  			r = -EFAULT;  			if (copy_from_user(&csigset, sigmask_arg->sigset, -					   sizeof csigset)) +					   sizeof(csigset)))  				goto out;  			sigset_from_compat(&sigset, &csigset);  			r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); @@ -2524,7 +2520,7 @@ static long kvm_vm_ioctl(struct file *filp,  		r = -EFAULT;  		if (copy_from_user(&kvm_userspace_mem, argp, -						sizeof kvm_userspace_mem)) +						sizeof(kvm_userspace_mem)))  			goto out;  		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); @@ -2534,7 +2530,7 @@ static long kvm_vm_ioctl(struct file *filp,  		struct kvm_dirty_log log;  		r = -EFAULT; -		if (copy_from_user(&log, argp, sizeof log)) +		if (copy_from_user(&log, argp, sizeof(log)))  			goto out;  		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);  		break; @@ -2542,16 +2538,18 @@ static long kvm_vm_ioctl(struct file *filp,  #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET  	case KVM_REGISTER_COALESCED_MMIO: {  		struct kvm_coalesced_mmio_zone zone; +  		r = -EFAULT; -		if (copy_from_user(&zone, argp, sizeof zone)) +		if (copy_from_user(&zone, argp, sizeof(zone)))  			goto out;  		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);  		break;  	}  	case KVM_UNREGISTER_COALESCED_MMIO: {  		struct kvm_coalesced_mmio_zone zone; +  		r = -EFAULT; -		if (copy_from_user(&zone, argp, sizeof zone)) +		if (copy_from_user(&zone, argp, sizeof(zone)))  			goto out;  		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);  		break; @@ -2561,7 +2559,7 @@ static long kvm_vm_ioctl(struct file *filp,  		struct kvm_irqfd data;  		r = -EFAULT; -		if (copy_from_user(&data, argp, sizeof data)) +		if (copy_from_user(&data, argp, sizeof(data)))  			goto out;  		r = kvm_irqfd(kvm, &data);  		break; @@ -2570,7 +2568,7 @@ static long kvm_vm_ioctl(struct file *filp,  		struct kvm_ioeventfd data;  		r = -EFAULT; -		if (copy_from_user(&data, argp, sizeof data)) +		if (copy_from_user(&data, argp, sizeof(data)))  			goto out;  		r = kvm_ioeventfd(kvm, &data);  		break; @@ -2591,7 +2589,7 @@ static long kvm_vm_ioctl(struct file *filp,  		struct kvm_msi msi;  		r = -EFAULT; -		if (copy_from_user(&msi, argp, sizeof msi)) +		if (copy_from_user(&msi, argp, sizeof(msi)))  			goto out;  		r = kvm_send_userspace_msi(kvm, &msi);  		break; @@ -2603,7 +2601,7 @@ static long kvm_vm_ioctl(struct file *filp,  		struct kvm_irq_level irq_event;  		r = -EFAULT; -		if (copy_from_user(&irq_event, argp, sizeof irq_event)) +		if (copy_from_user(&irq_event, argp, sizeof(irq_event)))  			goto out;  		r = kvm_vm_ioctl_irq_line(kvm, &irq_event, @@ -2613,7 +2611,7 @@ static long kvm_vm_ioctl(struct file *filp,  		r = -EFAULT;  		if (ioctl == KVM_IRQ_LINE_STATUS) { -			if (copy_to_user(argp, &irq_event, sizeof irq_event)) +			if (copy_to_user(argp, &irq_event, sizeof(irq_event)))  				goto out;  		} @@ -2646,7 +2644,7 @@ static long kvm_vm_ioctl(struct file *filp,  			goto out_free_irq_routing;  		r = kvm_set_irq_routing(kvm, entries, routing.nr,  					routing.flags); -	out_free_irq_routing: +out_free_irq_routing:  		vfree(entries);  		break;  	} @@ -2821,8 +2819,7 @@ static void hardware_enable_nolock(void *junk)  	if (r) {  		cpumask_clear_cpu(cpu, cpus_hardware_enabled);  		atomic_inc(&hardware_enable_failed); -		printk(KERN_INFO "kvm: enabling virtualization on " -				 "CPU%d failed\n", cpu); +		pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);  	}  } @@ -2898,12 +2895,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,  	val &= ~CPU_TASKS_FROZEN;  	switch (val) {  	case CPU_DYING: -		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", +		pr_info("kvm: disabling virtualization on CPU%d\n",  		       cpu);  		hardware_disable();  		break;  	case CPU_STARTING: -		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", +		pr_info("kvm: enabling virtualization on CPU%d\n",  		       cpu);  		hardware_enable();  		break; @@ -2920,7 +2917,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val,  	 *  	 * And Intel TXT required VMX off for all cpu when system shutdown.  	 */ -	printk(KERN_INFO "kvm: exiting hardware virtualization\n"); +	pr_info("kvm: exiting hardware virtualization\n");  	kvm_rebooting = true;  	on_each_cpu(hardware_disable_nolock, NULL, 1);  	return NOTIFY_OK; @@ -2944,7 +2941,7 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)  }  static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, -                                 const struct kvm_io_range *r2) +				 const struct kvm_io_range *r2)  {  	if (r1->addr < r2->addr)  		return -1; @@ -2997,7 +2994,7 @@ static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,  	return off;  } -static int __kvm_io_bus_write(struct kvm_io_bus *bus, +static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,  			      struct kvm_io_range *range, const void *val)  {  	int idx; @@ -3008,7 +3005,7 @@ static int __kvm_io_bus_write(struct kvm_io_bus *bus,  	while (idx < bus->dev_count &&  		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { -		if (!kvm_iodevice_write(bus->range[idx].dev, range->addr, +		if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,  					range->len, val))  			return idx;  		idx++; @@ -3018,7 +3015,7 @@ static int __kvm_io_bus_write(struct kvm_io_bus *bus,  }  /* kvm_io_bus_write - called under kvm->slots_lock */ -int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, +int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,  		     int len, const void *val)  {  	struct kvm_io_bus *bus; @@ -3030,14 +3027,14 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,  		.len = len,  	}; -	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); -	r = __kvm_io_bus_write(bus, &range, val); +	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); +	r = __kvm_io_bus_write(vcpu, bus, &range, val);  	return r < 0 ? r : 0;  }  /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ -int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, -			    int len, const void *val, long cookie) +int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, +			    gpa_t addr, int len, const void *val, long cookie)  {  	struct kvm_io_bus *bus;  	struct kvm_io_range range; @@ -3047,12 +3044,12 @@ int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,  		.len = len,  	}; -	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); +	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);  	/* First try the device referenced by cookie. */  	if ((cookie >= 0) && (cookie < bus->dev_count) &&  	    (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) -		if (!kvm_iodevice_write(bus->range[cookie].dev, addr, len, +		if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,  					val))  			return cookie; @@ -3060,11 +3057,11 @@ int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,  	 * cookie contained garbage; fall back to search and return the  	 * correct cookie value.  	 */ -	return __kvm_io_bus_write(bus, &range, val); +	return __kvm_io_bus_write(vcpu, bus, &range, val);  } -static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range, -			     void *val) +static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, +			     struct kvm_io_range *range, void *val)  {  	int idx; @@ -3074,7 +3071,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range,  	while (idx < bus->dev_count &&  		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { -		if (!kvm_iodevice_read(bus->range[idx].dev, range->addr, +		if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,  				       range->len, val))  			return idx;  		idx++; @@ -3085,7 +3082,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range,  EXPORT_SYMBOL_GPL(kvm_io_bus_write);  /* kvm_io_bus_read - called under kvm->slots_lock */ -int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, +int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,  		    int len, void *val)  {  	struct kvm_io_bus *bus; @@ -3097,8 +3094,8 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,  		.len = len,  	}; -	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); -	r = __kvm_io_bus_read(bus, &range, val); +	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); +	r = __kvm_io_bus_read(vcpu, bus, &range, val);  	return r < 0 ? r : 0;  } @@ -3268,6 +3265,7 @@ struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)  static void kvm_sched_in(struct preempt_notifier *pn, int cpu)  {  	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); +  	if (vcpu->preempted)  		vcpu->preempted = false; @@ -3349,7 +3347,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,  	r = misc_register(&kvm_dev);  	if (r) { -		printk(KERN_ERR "kvm: misc device register failed\n"); +		pr_err("kvm: misc device register failed\n");  		goto out_unreg;  	} @@ -3360,7 +3358,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,  	r = kvm_init_debug();  	if (r) { -		printk(KERN_ERR "kvm: create debugfs files failed\n"); +		pr_err("kvm: create debugfs files failed\n");  		goto out_undebugfs;  	} |