diff options
Diffstat (limited to 'virt')
| -rw-r--r-- | virt/kvm/kvm_main.c | 156 | 
1 files changed, 48 insertions, 108 deletions
| diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d31724500501..72c4e6b39389 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1531,11 +1531,10 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,  static int kvm_set_memslot(struct kvm *kvm,  			   const struct kvm_userspace_memory_region *mem, -			   struct kvm_memory_slot *old,  			   struct kvm_memory_slot *new, int as_id,  			   enum kvm_mr_change change)  { -	struct kvm_memory_slot *slot; +	struct kvm_memory_slot *slot, old;  	struct kvm_memslots *slots;  	int r; @@ -1566,7 +1565,7 @@ static int kvm_set_memslot(struct kvm *kvm,  		 * Note, the INVALID flag needs to be in the appropriate entry  		 * in the freshly allocated memslots, not in @old or @new.  		 */ -		slot = id_to_memslot(slots, old->id); +		slot = id_to_memslot(slots, new->id);  		slot->flags |= KVM_MEMSLOT_INVALID;  		/* @@ -1597,6 +1596,26 @@ static int kvm_set_memslot(struct kvm *kvm,  		kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));  	} +	/* +	 * Make a full copy of the old memslot, the pointer will become stale +	 * when the memslots are re-sorted by update_memslots(), and the old +	 * memslot needs to be referenced after calling update_memslots(), e.g. +	 * to free its resources and for arch specific behavior.  This needs to +	 * happen *after* (re)acquiring slots_arch_lock. +	 */ +	slot = id_to_memslot(slots, new->id); +	if (slot) { +		old = *slot; +	} else { +		WARN_ON_ONCE(change != KVM_MR_CREATE); +		memset(&old, 0, sizeof(old)); +		old.id = new->id; +		old.as_id = as_id; +	} + +	/* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */ +	memcpy(&new->arch, &old.arch, sizeof(old.arch)); +  	r = kvm_arch_prepare_memory_region(kvm, new, mem, change);  	if (r)  		goto out_slots; @@ -1604,14 +1623,18 @@ static int kvm_set_memslot(struct kvm *kvm,  	update_memslots(slots, new, change);  	slots = install_new_memslots(kvm, as_id, slots); -	kvm_arch_commit_memory_region(kvm, mem, old, new, change); +	kvm_arch_commit_memory_region(kvm, mem, &old, new, change); + +	/* Free the old memslot's metadata.  Note, this is the full copy!!! */ +	if (change == KVM_MR_DELETE) +		kvm_free_memslot(kvm, &old);  	kvfree(slots);  	return 0;  out_slots:  	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { -		slot = id_to_memslot(slots, old->id); +		slot = id_to_memslot(slots, new->id);  		slot->flags &= ~KVM_MEMSLOT_INVALID;  		slots = install_new_memslots(kvm, as_id, slots);  	} else { @@ -1626,7 +1649,6 @@ static int kvm_delete_memslot(struct kvm *kvm,  			      struct kvm_memory_slot *old, int as_id)  {  	struct kvm_memory_slot new; -	int r;  	if (!old->npages)  		return -EINVAL; @@ -1639,12 +1661,7 @@ static int kvm_delete_memslot(struct kvm *kvm,  	 */  	new.as_id = as_id; -	r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE); -	if (r) -		return r; - -	kvm_free_memslot(kvm, old); -	return 0; +	return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE);  }  /* @@ -1672,7 +1689,8 @@ int __kvm_set_memory_region(struct kvm *kvm,  	id = (u16)mem->slot;  	/* General sanity checks */ -	if (mem->memory_size & (PAGE_SIZE - 1)) +	if ((mem->memory_size & (PAGE_SIZE - 1)) || +	    (mem->memory_size != (unsigned long)mem->memory_size))  		return -EINVAL;  	if (mem->guest_phys_addr & (PAGE_SIZE - 1))  		return -EINVAL; @@ -1718,7 +1736,6 @@ int __kvm_set_memory_region(struct kvm *kvm,  	if (!old.npages) {  		change = KVM_MR_CREATE;  		new.dirty_bitmap = NULL; -		memset(&new.arch, 0, sizeof(new.arch));  	} else { /* Modify an existing slot. */  		if ((new.userspace_addr != old.userspace_addr) ||  		    (new.npages != old.npages) || @@ -1732,9 +1749,8 @@ int __kvm_set_memory_region(struct kvm *kvm,  		else /* Nothing to change. */  			return 0; -		/* Copy dirty_bitmap and arch from the current memslot. */ +		/* Copy dirty_bitmap from the current memslot. */  		new.dirty_bitmap = old.dirty_bitmap; -		memcpy(&new.arch, &old.arch, sizeof(new.arch));  	}  	if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { @@ -1760,7 +1776,7 @@ int __kvm_set_memory_region(struct kvm *kvm,  			bitmap_set(new.dirty_bitmap, 0, new.npages);  	} -	r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change); +	r = kvm_set_memslot(kvm, mem, &new, as_id, change);  	if (r)  		goto out_bitmap; @@ -2548,72 +2564,36 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)  }  EXPORT_SYMBOL_GPL(gfn_to_page); -void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache) +void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)  {  	if (pfn == 0)  		return; -	if (cache) -		cache->pfn = cache->gfn = 0; -  	if (dirty)  		kvm_release_pfn_dirty(pfn);  	else  		kvm_release_pfn_clean(pfn);  } -static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn, -				 struct gfn_to_pfn_cache *cache, u64 gen) -{ -	kvm_release_pfn(cache->pfn, cache->dirty, cache); - -	cache->pfn = gfn_to_pfn_memslot(slot, gfn); -	cache->gfn = gfn; -	cache->dirty = false; -	cache->generation = gen; -} - -static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, -			 struct kvm_host_map *map, -			 struct gfn_to_pfn_cache *cache, -			 bool atomic) +int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)  {  	kvm_pfn_t pfn;  	void *hva = NULL;  	struct page *page = KVM_UNMAPPED_PAGE; -	struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn); -	u64 gen = slots->generation;  	if (!map)  		return -EINVAL; -	if (cache) { -		if (!cache->pfn || cache->gfn != gfn || -			cache->generation != gen) { -			if (atomic) -				return -EAGAIN; -			kvm_cache_gfn_to_pfn(slot, gfn, cache, gen); -		} -		pfn = cache->pfn; -	} else { -		if (atomic) -			return -EAGAIN; -		pfn = gfn_to_pfn_memslot(slot, gfn); -	} +	pfn = gfn_to_pfn(vcpu->kvm, gfn);  	if (is_error_noslot_pfn(pfn))  		return -EINVAL;  	if (pfn_valid(pfn)) {  		page = pfn_to_page(pfn); -		if (atomic) -			hva = kmap_atomic(page); -		else -			hva = kmap(page); +		hva = kmap(page);  #ifdef CONFIG_HAS_IOMEM -	} else if (!atomic) { -		hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);  	} else { -		return -EINVAL; +		hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);  #endif  	} @@ -2627,27 +2607,9 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,  	return 0;  } - -int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, -		struct gfn_to_pfn_cache *cache, bool atomic) -{ -	return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, -			cache, atomic); -} -EXPORT_SYMBOL_GPL(kvm_map_gfn); - -int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) -{ -	return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map, -		NULL, false); -}  EXPORT_SYMBOL_GPL(kvm_vcpu_map); -static void __kvm_unmap_gfn(struct kvm *kvm, -			struct kvm_memory_slot *memslot, -			struct kvm_host_map *map, -			struct gfn_to_pfn_cache *cache, -			bool dirty, bool atomic) +void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)  {  	if (!map)  		return; @@ -2655,45 +2617,21 @@ static void __kvm_unmap_gfn(struct kvm *kvm,  	if (!map->hva)  		return; -	if (map->page != KVM_UNMAPPED_PAGE) { -		if (atomic) -			kunmap_atomic(map->hva); -		else -			kunmap(map->page); -	} +	if (map->page != KVM_UNMAPPED_PAGE) +		kunmap(map->page);  #ifdef CONFIG_HAS_IOMEM -	else if (!atomic) -		memunmap(map->hva);  	else -		WARN_ONCE(1, "Unexpected unmapping in atomic context"); +		memunmap(map->hva);  #endif  	if (dirty) -		mark_page_dirty_in_slot(kvm, memslot, map->gfn); +		kvm_vcpu_mark_page_dirty(vcpu, map->gfn); -	if (cache) -		cache->dirty |= dirty; -	else -		kvm_release_pfn(map->pfn, dirty, NULL); +	kvm_release_pfn(map->pfn, dirty);  	map->hva = NULL;  	map->page = NULL;  } - -int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,  -		  struct gfn_to_pfn_cache *cache, bool dirty, bool atomic) -{ -	__kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map, -			cache, dirty, atomic); -	return 0; -} -EXPORT_SYMBOL_GPL(kvm_unmap_gfn); - -void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) -{ -	__kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), -			map, NULL, dirty, false); -}  EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);  struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) @@ -2993,7 +2931,8 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,  	int r;  	gpa_t gpa = ghc->gpa + offset; -	BUG_ON(len + offset > ghc->len); +	if (WARN_ON_ONCE(len + offset > ghc->len)) +		return -EINVAL;  	if (slots->generation != ghc->generation) {  		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) @@ -3030,7 +2969,8 @@ int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,  	int r;  	gpa_t gpa = ghc->gpa + offset; -	BUG_ON(len + offset > ghc->len); +	if (WARN_ON_ONCE(len + offset > ghc->len)) +		return -EINVAL;  	if (slots->generation != ghc->generation) {  		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) |