diff options
Diffstat (limited to 'arch/x86/kvm/mmu.c')
| -rw-r--r-- | arch/x86/kvm/mmu.c | 12 | 
1 files changed, 12 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 40772ef0f2b1..e50425d0f5f7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2659,6 +2659,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,  	int emulate = 0;  	gfn_t pseudo_gfn; +	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) +		return 0; +  	for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {  		if (iterator.level == level) {  			mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, @@ -2829,6 +2832,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,  	bool ret = false;  	u64 spte = 0ull; +	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) +		return false; +  	if (!page_fault_can_be_fast(error_code))  		return false; @@ -3224,6 +3230,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)  	struct kvm_shadow_walk_iterator iterator;  	u64 spte = 0ull; +	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) +		return spte; +  	walk_shadow_page_lockless_begin(vcpu);  	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)  		if (!is_shadow_present_pte(spte)) @@ -4510,6 +4519,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])  	u64 spte;  	int nr_sptes = 0; +	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) +		return nr_sptes; +  	walk_shadow_page_lockless_begin(vcpu);  	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {  		sptes[iterator.level-1] = spte;  |