diff options
Diffstat (limited to 'arch/loongarch/kvm/mmu.c')
| -rw-r--r-- | arch/loongarch/kvm/mmu.c | 72 | 
1 files changed, 51 insertions, 21 deletions
| diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 98883aa23ab8..2634a9e8d82c 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -163,6 +163,7 @@ static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm,  			child = kvm_mmu_memory_cache_alloc(cache);  			_kvm_pte_init(child, ctx.invalid_ptes[ctx.level - 1]); +			smp_wmb(); /* Make pte visible before pmd */  			kvm_set_pte(entry, __pa(child));  		} else if (kvm_pte_huge(*entry)) {  			return entry; @@ -444,6 +445,17 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,  				   enum kvm_mr_change change)  {  	int needs_flush; +	u32 old_flags = old ? old->flags : 0; +	u32 new_flags = new ? new->flags : 0; +	bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES; + +	/* Only track memslot flags changed */ +	if (change != KVM_MR_FLAGS_ONLY) +		return; + +	/* Discard dirty page tracking on readonly memslot */ +	if ((old_flags & new_flags) & KVM_MEM_READONLY) +		return;  	/*  	 * If dirty page logging is enabled, write protect all pages in the slot @@ -454,9 +466,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,  	 * MOVE/DELETE:	The old mappings will already have been cleaned up by  	 *		kvm_arch_flush_shadow_memslot()  	 */ -	if (change == KVM_MR_FLAGS_ONLY && -	    (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && -	     new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { +	if (!(old_flags & KVM_MEM_LOG_DIRTY_PAGES) && log_dirty_pages) { +		/* +		 * Initially-all-set does not require write protecting any page +		 * because they're all assumed to be dirty. +		 */ +		if (kvm_dirty_log_manual_protect_and_init_set(kvm)) +			return; +  		spin_lock(&kvm->mmu_lock);  		/* Write protect GPA page table entries */  		needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn, @@ -540,6 +557,7 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ  	gfn_t gfn = gpa >> PAGE_SHIFT;  	struct kvm *kvm = vcpu->kvm;  	struct kvm_memory_slot *slot; +	struct page *page;  	spin_lock(&kvm->mmu_lock); @@ -551,10 +569,8 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ  	}  	/* Track access to pages marked old */ -	new = *ptep; -	if (!kvm_pte_young(new)) -		new = kvm_pte_mkyoung(new); -		/* call kvm_set_pfn_accessed() after unlock */ +	new = kvm_pte_mkyoung(*ptep); +	/* call kvm_set_pfn_accessed() after unlock */  	if (write && !kvm_pte_dirty(new)) {  		if (!kvm_pte_write(new)) { @@ -582,19 +598,22 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ  	if (changed) {  		kvm_set_pte(ptep, new);  		pfn = kvm_pte_pfn(new); +		page = kvm_pfn_to_refcounted_page(pfn); +		if (page) +			get_page(page);  	}  	spin_unlock(&kvm->mmu_lock); -	/* -	 * Fixme: pfn may be freed after mmu_lock -	 * kvm_try_get_pfn(pfn)/kvm_release_pfn pair to prevent this? -	 */ -	if (kvm_pte_young(changed)) -		kvm_set_pfn_accessed(pfn); +	if (changed) { +		if (kvm_pte_young(changed)) +			kvm_set_pfn_accessed(pfn); -	if (kvm_pte_dirty(changed)) { -		mark_page_dirty(kvm, gfn); -		kvm_set_pfn_dirty(pfn); +		if (kvm_pte_dirty(changed)) { +			mark_page_dirty(kvm, gfn); +			kvm_set_pfn_dirty(pfn); +		} +		if (page) +			put_page(page);  	}  	return ret;  out: @@ -737,6 +756,7 @@ static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t g  		val += PAGE_SIZE;  	} +	smp_wmb(); /* Make pte visible before pmd */  	/* The later kvm_flush_tlb_gpa() will flush hugepage tlb */  	kvm_set_pte(ptep, __pa(child)); @@ -858,11 +878,21 @@ retry:  	/* Disable dirty logging on HugePages */  	level = 0; -	if (!fault_supports_huge_mapping(memslot, hva, write)) { -		level = 0; -	} else { +	if (fault_supports_huge_mapping(memslot, hva, write)) { +		/* Check page level about host mmu*/  		level = host_pfn_mapping_level(kvm, gfn, memslot);  		if (level == 1) { +			/* +			 * Check page level about secondary mmu +			 * Disable hugepage if it is normal page on +			 * secondary mmu already +			 */ +			ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); +			if (ptep && !kvm_pte_huge(*ptep)) +				level = 0; +		} + +		if (level == 1) {  			gfn = gfn & ~(PTRS_PER_PTE - 1);  			pfn = pfn & ~(PTRS_PER_PTE - 1);  		} @@ -892,7 +922,6 @@ retry:  		kvm_set_pfn_dirty(pfn);  	} -	kvm_set_pfn_accessed(pfn);  	kvm_release_pfn_clean(pfn);  out:  	srcu_read_unlock(&kvm->srcu, srcu_idx); @@ -908,7 +937,8 @@ int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)  		return ret;  	/* Invalidate this entry in the TLB */ -	kvm_flush_tlb_gpa(vcpu, gpa); +	vcpu->arch.flush_gpa = gpa; +	kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);  	return 0;  } |