diff options
author | Vipin Sharma <vipinsh@google.com> | 2023-03-21 15:00:15 -0700 |
---|---|---|
committer | Sean Christopherson <seanjc@google.com> | 2023-04-04 12:37:30 -0700 |
commit | e73008705d0c88ac69e3fe262bb03d86af23c521 (patch) | |
tree | 588a6ddd83832ea76243612d83187e033db9e5f6 /arch/x86/kvm/mmu | |
parent | 1e0f42985ffad5c60e7a0a627405e1ff00208289 (diff) |
KVM: x86/mmu: Remove "record_dirty_log" in __tdp_mmu_set_spte()
Remove bool parameter "record_dirty_log" from __tdp_mmu_set_spte() and
refactor the code as this variable is always set to true by its caller.
Signed-off-by: Vipin Sharma <vipinsh@google.com>
Reviewed-by: David Matlack <dmatlack@google.com>
Link: https://lore.kernel.org/r/20230321220021.2119033-8-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/mmu')
-rw-r--r-- | arch/x86/kvm/mmu/tdp_mmu.c | 24 |
1 files changed, 9 insertions, 15 deletions
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 467931c43968..3cc81fa22b7f 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -708,18 +708,13 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, * notifier for access tracking. Leaving record_acc_track * unset in that case prevents page accesses from being * double counted. - * @record_dirty_log: Record the page as dirty in the dirty bitmap if - * appropriate for the change being made. Should be set - * unless performing certain dirty logging operations. - * Leaving record_dirty_log unset in that case prevents page - * writes from being double counted. * * Returns the old SPTE value, which _may_ be different than @old_spte if the * SPTE had voldatile bits. */ static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, u64 old_spte, u64 new_spte, gfn_t gfn, int level, - bool record_acc_track, bool record_dirty_log) + bool record_acc_track) { lockdep_assert_held_write(&kvm->mmu_lock); @@ -738,35 +733,34 @@ static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, if (record_acc_track) handle_changed_spte_acc_track(old_spte, new_spte, level); - if (record_dirty_log) - handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, - new_spte, level); + + handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, new_spte, + level); return old_spte; } static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, - u64 new_spte, bool record_acc_track, - bool record_dirty_log) + u64 new_spte, bool record_acc_track) { WARN_ON_ONCE(iter->yielded); iter->old_spte = __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, iter->old_spte, new_spte, iter->gfn, iter->level, - record_acc_track, record_dirty_log); + record_acc_track); } static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, u64 new_spte) { - _tdp_mmu_set_spte(kvm, iter, new_spte, true, true); + _tdp_mmu_set_spte(kvm, iter, new_spte, true); } static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, struct tdp_iter *iter, u64 new_spte) { - _tdp_mmu_set_spte(kvm, iter, new_spte, false, true); + _tdp_mmu_set_spte(kvm, iter, new_spte, false); } #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ @@ -916,7 +910,7 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) return false; __tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0, - sp->gfn, sp->role.level + 1, true, true); + sp->gfn, sp->role.level + 1, true); return true; } |