aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/nested.c
diff options
context:
space:
mode:
authorMarc Zyngier <[email protected]>2024-06-14 15:45:39 +0100
committerOliver Upton <[email protected]>2024-06-19 08:13:49 +0000
commitfd276e71d1e7b7f729050f2da235a1e6fe4f328a (patch)
tree0f31782e25dc8acad0edd986c9d43a87e628e2c7 /arch/arm64/kvm/nested.c
parent61e30b9eef7ffc7f88ffd95e969cfb662e41bb05 (diff)
KVM: arm64: nv: Handle shadow stage 2 page faults
If we are faulting on a shadow stage 2 translation, we first walk the guest hypervisor's stage 2 page table to see if it has a mapping. If not, we inject a stage 2 page fault to the virtual EL2. Otherwise, we create a mapping in the shadow stage 2 page table. Note that we have to deal with two IPAs when we got a shadow stage 2 page fault. One is the address we faulted on, and is in the L2 guest phys space. The other is from the guest stage-2 page table walk, and is in the L1 guest phys space. To differentiate them, we rename variables so that fault_ipa is used for the former and ipa is used for the latter. When mapping a page in a shadow stage-2, special care must be taken not to be more permissive than the guest is. Co-developed-by: Christoffer Dall <[email protected]> Co-developed-by: Jintack Lim <[email protected]> Signed-off-by: Christoffer Dall <[email protected]> Signed-off-by: Jintack Lim <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Oliver Upton <[email protected]>
Diffstat (limited to 'arch/arm64/kvm/nested.c')
-rw-r--r--arch/arm64/kvm/nested.c45
1 files changed, 45 insertions, 0 deletions
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 8c325d0a5636..2a8821674b10 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -121,6 +121,15 @@ static u32 compute_fsc(int level, u32 fsc)
return fsc | (level & 0x3);
}
+static int esr_s2_fault(struct kvm_vcpu *vcpu, int level, u32 fsc)
+{
+ u32 esr;
+
+ esr = kvm_vcpu_get_esr(vcpu) & ~ESR_ELx_FSC;
+ esr |= compute_fsc(level, fsc);
+ return esr;
+}
+
static int get_ia_size(struct s2_walk_info *wi)
{
return 64 - wi->t0sz;
@@ -482,6 +491,42 @@ void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
}
}
+/*
+ * Returns non-zero if permission fault is handled by injecting it to the next
+ * level hypervisor.
+ */
+int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, struct kvm_s2_trans *trans)
+{
+ bool forward_fault = false;
+
+ trans->esr = 0;
+
+ if (!kvm_vcpu_trap_is_permission_fault(vcpu))
+ return 0;
+
+ if (kvm_vcpu_trap_is_iabt(vcpu)) {
+ forward_fault = !kvm_s2_trans_executable(trans);
+ } else {
+ bool write_fault = kvm_is_write_fault(vcpu);
+
+ forward_fault = ((write_fault && !trans->writable) ||
+ (!write_fault && !trans->readable));
+ }
+
+ if (forward_fault)
+ trans->esr = esr_s2_fault(vcpu, trans->level, ESR_ELx_FSC_PERM);
+
+ return forward_fault;
+}
+
+int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
+{
+ vcpu_write_sys_reg(vcpu, vcpu->arch.fault.far_el2, FAR_EL2);
+ vcpu_write_sys_reg(vcpu, vcpu->arch.fault.hpfar_el2, HPFAR_EL2);
+
+ return kvm_inject_nested_sync(vcpu, esr_el2);
+}
+
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
int i;