diff options
Diffstat (limited to 'virt/kvm/kvm_main.c')
| -rw-r--r-- | virt/kvm/kvm_main.c | 106 | 
1 files changed, 75 insertions, 31 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5e709ebb7c40..2e93630b4add 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -217,6 +217,11 @@ void kvm_make_mclock_inprogress_request(struct kvm *kvm)  	make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);  } +void kvm_make_update_eoibitmap_request(struct kvm *kvm) +{ +	make_all_cpus_request(kvm, KVM_REQ_EOIBITMAP); +} +  int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)  {  	struct page *page; @@ -714,6 +719,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,  }  /* + * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: + * - create a new memory slot + * - delete an existing memory slot + * - modify an existing memory slot + *   -- move it in the guest physical memory space + *   -- just change its flags + * + * Since flags can be changed by some of these operations, the following + * differentiation is the best we can do for __kvm_set_memory_region(): + */ +enum kvm_mr_change { +	KVM_MR_CREATE, +	KVM_MR_DELETE, +	KVM_MR_MOVE, +	KVM_MR_FLAGS_ONLY, +}; + +/*   * Allocate some memory and give it an address in the guest physical address   * space.   * @@ -731,6 +754,7 @@ int __kvm_set_memory_region(struct kvm *kvm,  	struct kvm_memory_slot *slot;  	struct kvm_memory_slot old, new;  	struct kvm_memslots *slots = NULL, *old_memslots; +	enum kvm_mr_change change;  	r = check_memory_region_flags(mem);  	if (r) @@ -772,17 +796,31 @@ int __kvm_set_memory_region(struct kvm *kvm,  	new.npages = npages;  	new.flags = mem->flags; -	/* -	 * Disallow changing a memory slot's size or changing anything about -	 * zero sized slots that doesn't involve making them non-zero. -	 */  	r = -EINVAL; -	if (npages && old.npages && npages != old.npages) -		goto out; -	if (!npages && !old.npages) +	if (npages) { +		if (!old.npages) +			change = KVM_MR_CREATE; +		else { /* Modify an existing slot. */ +			if ((mem->userspace_addr != old.userspace_addr) || +			    (npages != old.npages) || +			    ((new.flags ^ old.flags) & KVM_MEM_READONLY)) +				goto out; + +			if (base_gfn != old.base_gfn) +				change = KVM_MR_MOVE; +			else if (new.flags != old.flags) +				change = KVM_MR_FLAGS_ONLY; +			else { /* Nothing to change. */ +				r = 0; +				goto out; +			} +		} +	} else if (old.npages) { +		change = KVM_MR_DELETE; +	} else /* Modify a non-existent slot: disallowed. */  		goto out; -	if ((npages && !old.npages) || (base_gfn != old.base_gfn)) { +	if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {  		/* Check for overlaps */  		r = -EEXIST;  		kvm_for_each_memslot(slot, kvm->memslots) { @@ -800,20 +838,12 @@ int __kvm_set_memory_region(struct kvm *kvm,  		new.dirty_bitmap = NULL;  	r = -ENOMEM; - -	/* -	 * Allocate if a slot is being created.  If modifying a slot, -	 * the userspace_addr cannot change. -	 */ -	if (!old.npages) { +	if (change == KVM_MR_CREATE) {  		new.user_alloc = user_alloc;  		new.userspace_addr = mem->userspace_addr;  		if (kvm_arch_create_memslot(&new, npages))  			goto out_free; -	} else if (npages && mem->userspace_addr != old.userspace_addr) { -		r = -EINVAL; -		goto out_free;  	}  	/* Allocate page dirty bitmap if needed */ @@ -822,7 +852,7 @@ int __kvm_set_memory_region(struct kvm *kvm,  			goto out_free;  	} -	if (!npages || base_gfn != old.base_gfn) { +	if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {  		r = -ENOMEM;  		slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),  				GFP_KERNEL); @@ -863,15 +893,23 @@ int __kvm_set_memory_region(struct kvm *kvm,  			goto out_free;  	} -	/* map new memory slot into the iommu */ -	if (npages) { +	/* +	 * IOMMU mapping:  New slots need to be mapped.  Old slots need to be +	 * un-mapped and re-mapped if their base changes.  Since base change +	 * unmapping is handled above with slot deletion, mapping alone is +	 * needed here.  Anything else the iommu might care about for existing +	 * slots (size changes, userspace addr changes and read-only flag +	 * changes) is disallowed above, so any other attribute changes getting +	 * here can be skipped. +	 */ +	if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {  		r = kvm_iommu_map_pages(kvm, &new);  		if (r)  			goto out_slots;  	}  	/* actual memory is freed via old in kvm_free_physmem_slot below */ -	if (!npages) { +	if (change == KVM_MR_DELETE) {  		new.dirty_bitmap = NULL;  		memset(&new.arch, 0, sizeof(new.arch));  	} @@ -1669,6 +1707,7 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target)  {  	struct pid *pid;  	struct task_struct *task = NULL; +	bool ret = false;  	rcu_read_lock();  	pid = rcu_dereference(target->pid); @@ -1676,17 +1715,15 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target)  		task = get_pid_task(target->pid, PIDTYPE_PID);  	rcu_read_unlock();  	if (!task) -		return false; +		return ret;  	if (task->flags & PF_VCPU) {  		put_task_struct(task); -		return false; -	} -	if (yield_to(task, 1)) { -		put_task_struct(task); -		return true; +		return ret;  	} +	ret = yield_to(task, 1);  	put_task_struct(task); -	return false; + +	return ret;  }  EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); @@ -1727,12 +1764,14 @@ bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)  	return eligible;  }  #endif +  void kvm_vcpu_on_spin(struct kvm_vcpu *me)  {  	struct kvm *kvm = me->kvm;  	struct kvm_vcpu *vcpu;  	int last_boosted_vcpu = me->kvm->last_boosted_vcpu;  	int yielded = 0; +	int try = 3;  	int pass;  	int i; @@ -1744,7 +1783,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)  	 * VCPU is holding the lock that we need and will release it.  	 * We approximate round-robin by starting at the last boosted VCPU.  	 */ -	for (pass = 0; pass < 2 && !yielded; pass++) { +	for (pass = 0; pass < 2 && !yielded && try; pass++) {  		kvm_for_each_vcpu(i, vcpu, kvm) {  			if (!pass && i <= last_boosted_vcpu) {  				i = last_boosted_vcpu; @@ -1757,10 +1796,15 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)  				continue;  			if (!kvm_vcpu_eligible_for_directed_yield(vcpu))  				continue; -			if (kvm_vcpu_yield_to(vcpu)) { + +			yielded = kvm_vcpu_yield_to(vcpu); +			if (yielded > 0) {  				kvm->last_boosted_vcpu = i; -				yielded = 1;  				break; +			} else if (yielded < 0) { +				try--; +				if (!try) +					break;  			}  		}  	}  |