aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Matlack <[email protected]>2023-01-26 10:40:22 -0800
committerSean Christopherson <[email protected]>2023-03-17 15:16:12 -0700
commit8c63e8c2176552d5c003d7459609383d32bf47f3 (patch)
tree8a81fc7a6c80e8e63ceb0447123c5416e6639b26
parent28e4b4597d65927f8147c493d66aa0fe006e364c (diff)
KVM: x86/mmu: Rename kvm_flush_remote_tlbs_with_address()
Rename kvm_flush_remote_tlbs_with_address() to kvm_flush_remote_tlbs_range(). This name is shorter, which reduces the number of callsites that need to be broken up across multiple lines, and more readable since it conveys a range of memory is being flushed rather than a single address. No functional change intended. Signed-off-by: David Matlack <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
-rw-r--r--arch/x86/kvm/mmu/mmu.c14
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h7
2 files changed, 8 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index ed1df733f12a..b6635da53cb3 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -261,8 +261,7 @@ static inline bool kvm_available_flush_tlb_with_range(void)
return kvm_x86_ops.tlb_remote_flush_with_range;
}
-void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
- u64 start_gfn, u64 pages)
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, u64 start_gfn, u64 pages)
{
struct kvm_tlb_range range;
int ret = -EOPNOTSUPP;
@@ -5922,9 +5921,8 @@ slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot,
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
if (flush && flush_on_yield) {
- kvm_flush_remote_tlbs_with_address(kvm,
- start_gfn,
- iterator.gfn - start_gfn + 1);
+ kvm_flush_remote_tlbs_range(kvm, start_gfn,
+ iterator.gfn - start_gfn + 1);
flush = false;
}
cond_resched_rwlock_write(&kvm->mmu_lock);
@@ -6279,8 +6277,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
}
if (flush)
- kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
- gfn_end - gfn_start);
+ kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
kvm_mmu_invalidate_end(kvm, 0, -1ul);
@@ -6669,8 +6666,7 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
* is observed by any other operation on the same memslot.
*/
lockdep_assert_held(&kvm->slots_lock);
- kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
- memslot->npages);
+ kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
}
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 2cbb155c686c..4b2a1dc43db3 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -170,14 +170,13 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn,
int min_level);
-void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
- u64 start_gfn, u64 pages);
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, u64 start_gfn, u64 pages);
/* Flush the given page (huge or not) of guest memory. */
static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
{
- kvm_flush_remote_tlbs_with_address(kvm, gfn_round_for_level(gfn, level),
- KVM_PAGES_PER_HPAGE(level));
+ kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level),
+ KVM_PAGES_PER_HPAGE(level));
}
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);