diff options
Diffstat (limited to 'virt/kvm/kvm_main.c')
| -rw-r--r-- | virt/kvm/kvm_main.c | 54 | 
1 files changed, 32 insertions, 22 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 8b47507faab5..f986e31fa68c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -140,9 +140,10 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);  static unsigned long long kvm_createvm_count;  static unsigned long long kvm_active_vms; -__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, -		unsigned long start, unsigned long end) +__weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, +		unsigned long start, unsigned long end, bool blockable)  { +	return 0;  }  bool kvm_is_reserved_pfn(kvm_pfn_t pfn) @@ -273,7 +274,8 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)  	 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that  	 * barrier here.  	 */ -	if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) +	if (!kvm_arch_flush_remote_tlb(kvm) +	    || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))  		++kvm->stat.remote_tlb_flush;  	cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);  } @@ -359,13 +361,15 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,  	srcu_read_unlock(&kvm->srcu, idx);  } -static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, +static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,  						    struct mm_struct *mm,  						    unsigned long start, -						    unsigned long end) +						    unsigned long end, +						    bool blockable)  {  	struct kvm *kvm = mmu_notifier_to_kvm(mn);  	int need_tlb_flush = 0, idx; +	int ret;  	idx = srcu_read_lock(&kvm->srcu);  	spin_lock(&kvm->mmu_lock); @@ -383,9 +387,11 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,  	spin_unlock(&kvm->mmu_lock); -	kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); +	ret = kvm_arch_mmu_notifier_invalidate_range(kvm, start, end, blockable);  	srcu_read_unlock(&kvm->srcu, idx); + +	return ret;  }  static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, @@ -1169,7 +1175,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,  	n = kvm_dirty_bitmap_bytes(memslot); -	dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); +	dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);  	memset(dirty_bitmap_buffer, 0, n);  	spin_lock(&kvm->mmu_lock); @@ -1342,18 +1348,16 @@ static inline int check_user_page_hwpoison(unsigned long addr)  }  /* - * The atomic path to get the writable pfn which will be stored in @pfn, - * true indicates success, otherwise false is returned. + * The fast path to get the writable pfn which will be stored in @pfn, + * true indicates success, otherwise false is returned.  It's also the + * only part that runs if we can are in atomic context.   */ -static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, -			    bool write_fault, bool *writable, kvm_pfn_t *pfn) +static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, +			    bool *writable, kvm_pfn_t *pfn)  {  	struct page *page[1];  	int npages; -	if (!(async || atomic)) -		return false; -  	/*  	 * Fast pin a writable pfn only if it is a write fault request  	 * or the caller allows to map a writable pfn for a read fault @@ -1497,7 +1501,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,  	/* we can do it either atomically or asynchronously, not both */  	BUG_ON(atomic && async); -	if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) +	if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))  		return pfn;  	if (atomic) @@ -2127,16 +2131,22 @@ static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)  static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)  { +	int ret = -EINTR; +	int idx = srcu_read_lock(&vcpu->kvm->srcu); +  	if (kvm_arch_vcpu_runnable(vcpu)) {  		kvm_make_request(KVM_REQ_UNHALT, vcpu); -		return -EINTR; +		goto out;  	}  	if (kvm_cpu_has_pending_timer(vcpu)) -		return -EINTR; +		goto out;  	if (signal_pending(current)) -		return -EINTR; +		goto out; -	return 0; +	ret = 0; +out: +	srcu_read_unlock(&vcpu->kvm->srcu, idx); +	return ret;  }  /* @@ -2172,7 +2182,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)  	kvm_arch_vcpu_blocking(vcpu);  	for (;;) { -		prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); +		prepare_to_swait_exclusive(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);  		if (kvm_vcpu_check_block(vcpu) < 0)  			break; @@ -2214,7 +2224,7 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)  	wqp = kvm_arch_vcpu_wq(vcpu);  	if (swq_has_sleeper(wqp)) { -		swake_up(wqp); +		swake_up_one(wqp);  		++vcpu->stat.halt_wakeup;  		return true;  	} @@ -2563,7 +2573,7 @@ static long kvm_vcpu_ioctl(struct file *filp,  		if (arg)  			goto out;  		oldpid = rcu_access_pointer(vcpu->pid); -		if (unlikely(oldpid != current->pids[PIDTYPE_PID].pid)) { +		if (unlikely(oldpid != task_pid(current))) {  			/* The thread running this VCPU changed. */  			struct pid *newpid;  |