aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/nested.c
diff options
context:
space:
mode:
authorOliver Upton <[email protected]>2024-10-07 23:30:26 +0000
committerMarc Zyngier <[email protected]>2024-10-08 10:40:27 +0100
commit3c164eb9464d39ba339c1487dcac0dc9508e03f0 (patch)
tree9d2898b907c88e59b8c17a32d3f903c2c6f6cb89 /arch/arm64/kvm/nested.c
parent6ded46b5a4fd7fc9c6104b770627043aaf996abf (diff)
KVM: arm64: nv: Do not block when unmapping stage-2 if disallowed
Right now the nested code allows unmap operations on a shadow stage-2 to block unconditionally. This is wrong in a couple places, such as a non-blocking MMU notifier or on the back of a sched_in() notifier as part of shadow MMU recycling. Carry through whether or not blocking is allowed to kvm_pgtable_stage2_unmap(). This 'fixes' an issue where stage-2 MMU reclaim would precipitate a stack overflow from a pile of kvm_sched_in() callbacks, all trying to recycle a stage-2 MMU. Signed-off-by: Oliver Upton <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Marc Zyngier <[email protected]>
Diffstat (limited to 'arch/arm64/kvm/nested.c')
-rw-r--r--arch/arm64/kvm/nested.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index df670c14e1c6..58d3b998793a 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -634,7 +634,7 @@ static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
/* Clear the old state */
if (kvm_s2_mmu_valid(s2_mmu))
- kvm_stage2_unmap_range(s2_mmu, 0, kvm_phys_size(s2_mmu));
+ kvm_stage2_unmap_range(s2_mmu, 0, kvm_phys_size(s2_mmu), false);
/*
* The virtual VMID (modulo CnP) will be used as a key when matching
@@ -745,7 +745,7 @@ void kvm_nested_s2_wp(struct kvm *kvm)
}
}
-void kvm_nested_s2_unmap(struct kvm *kvm)
+void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block)
{
int i;
@@ -755,7 +755,7 @@ void kvm_nested_s2_unmap(struct kvm *kvm)
struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
if (kvm_s2_mmu_valid(mmu))
- kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu));
+ kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), may_block);
}
}