diff options
Diffstat (limited to 'arch/x86/kvm/mmu/tdp_mmu.c')
| -rw-r--r-- | arch/x86/kvm/mmu/tdp_mmu.c | 67 | 
1 files changed, 30 insertions, 37 deletions
| diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index a54c3491af42..1beb4ca90560 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -317,9 +317,6 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,  	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));  	int level = sp->role.level;  	gfn_t base_gfn = sp->gfn; -	u64 old_child_spte; -	u64 *sptep; -	gfn_t gfn;  	int i;  	trace_kvm_mmu_prepare_zap_page(sp); @@ -327,8 +324,9 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,  	tdp_mmu_unlink_page(kvm, sp, shared);  	for (i = 0; i < PT64_ENT_PER_PAGE; i++) { -		sptep = rcu_dereference(pt) + i; -		gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); +		u64 *sptep = rcu_dereference(pt) + i; +		gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); +		u64 old_child_spte;  		if (shared) {  			/* @@ -374,7 +372,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,  				    shared);  	} -	kvm_flush_remote_tlbs_with_address(kvm, gfn, +	kvm_flush_remote_tlbs_with_address(kvm, base_gfn,  					   KVM_PAGES_PER_HPAGE(level + 1));  	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); @@ -504,6 +502,8 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,  					   struct tdp_iter *iter,  					   u64 new_spte)  { +	WARN_ON_ONCE(iter->yielded); +  	lockdep_assert_held_read(&kvm->mmu_lock);  	/* @@ -577,6 +577,8 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,  				      u64 new_spte, bool record_acc_track,  				      bool record_dirty_log)  { +	WARN_ON_ONCE(iter->yielded); +  	lockdep_assert_held_write(&kvm->mmu_lock);  	/* @@ -642,18 +644,19 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,   * If this function should yield and flush is set, it will perform a remote   * TLB flush before yielding.   * - * If this function yields, it will also reset the tdp_iter's walk over the - * paging structure and the calling function should skip to the next - * iteration to allow the iterator to continue its traversal from the - * paging structure root. + * If this function yields, iter->yielded is set and the caller must skip to + * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk + * over the paging structures to allow the iterator to continue its traversal + * from the paging structure root.   * - * Return true if this function yielded and the iterator's traversal was reset. - * Return false if a yield was not needed. + * Returns true if this function yielded.   */ -static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, -					     struct tdp_iter *iter, bool flush, -					     bool shared) +static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, +							  struct tdp_iter *iter, +							  bool flush, bool shared)  { +	WARN_ON(iter->yielded); +  	/* Ensure forward progress has been made before yielding. */  	if (iter->next_last_level_gfn == iter->yielded_gfn)  		return false; @@ -673,12 +676,10 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,  		WARN_ON(iter->gfn > iter->next_last_level_gfn); -		tdp_iter_restart(iter); - -		return true; +		iter->yielded = true;  	} -	return false; +	return iter->yielded;  }  /* @@ -1033,9 +1034,9 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,  {  	struct kvm_mmu_page *root; -	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) -		flush |= zap_gfn_range(kvm, root, range->start, range->end, -				       range->may_block, flush, false); +	for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false) +		flush = zap_gfn_range(kvm, root, range->start, range->end, +				      range->may_block, flush, false);  	return flush;  } @@ -1364,10 +1365,9 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,   * Clear leaf entries which could be replaced by large mappings, for   * GFNs within the slot.   */ -static bool zap_collapsible_spte_range(struct kvm *kvm, +static void zap_collapsible_spte_range(struct kvm *kvm,  				       struct kvm_mmu_page *root, -				       const struct kvm_memory_slot *slot, -				       bool flush) +				       const struct kvm_memory_slot *slot)  {  	gfn_t start = slot->base_gfn;  	gfn_t end = start + slot->npages; @@ -1378,10 +1378,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,  	tdp_root_for_each_pte(iter, root, start, end) {  retry: -		if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) { -			flush = false; +		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))  			continue; -		}  		if (!is_shadow_present_pte(iter.old_spte) ||  		    !is_last_spte(iter.old_spte, iter.level)) @@ -1393,6 +1391,7 @@ retry:  							    pfn, PG_LEVEL_NUM))  			continue; +		/* Note, a successful atomic zap also does a remote TLB flush. */  		if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {  			/*  			 * The iter must explicitly re-read the SPTE because @@ -1401,30 +1400,24 @@ retry:  			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));  			goto retry;  		} -		flush = true;  	}  	rcu_read_unlock(); - -	return flush;  }  /*   * Clear non-leaf entries (and free associated page tables) which could   * be replaced by large mappings, for GFNs within the slot.   */ -bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, -				       const struct kvm_memory_slot *slot, -				       bool flush) +void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, +				       const struct kvm_memory_slot *slot)  {  	struct kvm_mmu_page *root;  	lockdep_assert_held_read(&kvm->mmu_lock);  	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) -		flush = zap_collapsible_spte_range(kvm, root, slot, flush); - -	return flush; +		zap_collapsible_spte_range(kvm, root, slot);  }  /* |