diff options
author | Sean Christopherson <[email protected]> | 2024-08-09 12:43:25 -0700 |
---|---|---|
committer | Sean Christopherson <[email protected]> | 2024-09-09 20:22:04 -0700 |
commit | 548f87f667a38ffeb2f021d9cfbc1f1b34fb4cb5 (patch) | |
tree | e4776eeced952920bea8ecdfea3f62310191a9b4 | |
parent | dd9eaad744f4ed30913cca423439a1765a760c71 (diff) |
KVM: x86/mmu: Honor NEED_RESCHED when zapping rmaps and blocking is allowed
Convert kvm_unmap_gfn_range(), which is the helper that zaps rmap SPTEs in
response to an mmu_notifier invalidation, to use __kvm_rmap_zap_gfn_range()
and feed in range->may_block. In other words, honor NEED_RESCHED by way of
cond_resched() when zapping rmaps. This fixes a long-standing issue where
KVM could process an absurd number of rmap entries without ever yielding,
e.g. if an mmu_notifier fired on a PUD (or larger) range.
Opportunistically rename __kvm_zap_rmap() to kvm_zap_rmap(), and drop the
old kvm_zap_rmap(). Ideally, the shuffling would be done in a different
patch, but that just makes the compiler unhappy, e.g.
arch/x86/kvm/mmu/mmu.c:1462:13: error: ‘kvm_zap_rmap’ defined but not used
Reported-by: Peter Xu <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Sean Christopherson <[email protected]>
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 16 |
1 files changed, 6 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 70b043d7701d..27a8a4f486c5 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1435,16 +1435,10 @@ static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn) return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K); } -static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - const struct kvm_memory_slot *slot) -{ - return kvm_zap_all_rmap_sptes(kvm, rmap_head); -} - static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level) + const struct kvm_memory_slot *slot) { - return __kvm_zap_rmap(kvm, rmap_head, slot); + return kvm_zap_all_rmap_sptes(kvm, rmap_head); } struct slot_rmap_walk_iterator { @@ -1578,7 +1572,7 @@ static bool __kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end, bool can_yield, bool flush) { - return __walk_slot_rmaps(kvm, slot, __kvm_zap_rmap, + return __walk_slot_rmaps(kvm, slot, kvm_zap_rmap, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, start, end - 1, can_yield, true, flush); } @@ -1607,7 +1601,9 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) bool flush = false; if (kvm_memslots_have_rmaps(kvm)) - flush = kvm_handle_gfn_range(kvm, range, kvm_zap_rmap); + flush = __kvm_rmap_zap_gfn_range(kvm, range->slot, + range->start, range->end, + range->may_block, flush); if (tdp_mmu_enabled) flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush); |