aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <[email protected]>2020-11-27 11:33:27 +0000
committerMarc Zyngier <[email protected]>2020-11-27 11:33:27 +0000
commit8c38602fb3ebe12ff8c63588147ed1cfb6890733 (patch)
treeffda9905174ba84cda3363405205b619b17bae1a
parent149f120edbf89d894bd4e2c83ad7aa55c780d569 (diff)
parent83fa381f66ccb025f9e182d0b2ef9cd53c1f33ab (diff)
Merge branch 'kvm-arm64/host-hvc-table' into kvmarm-master/next
Signed-off-by: Marc Zyngier <[email protected]>
-rw-r--r--arch/arm64/include/asm/kvm_asm.h2
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h40
-rw-r--r--arch/arm64/include/asm/sysreg.h1
-rw-r--r--arch/arm64/kernel/image-vars.h5
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S11
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c232
-rw-r--r--arch/arm64/kvm/hyp/nvhe/sysreg-sr.c11
-rw-r--r--arch/arm64/kvm/va_layout.c56
8 files changed, 241 insertions, 117 deletions
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 54387ccd1ab2..a542c422a036 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -189,8 +189,6 @@ extern void __kvm_timer_set_cntvoff(u64 cntvoff);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
-extern void __kvm_enable_ssbs(void);
-
extern u64 __vgic_v3_get_ich_vtr_el2(void);
extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 331394306cce..5633c5a27aad 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -72,6 +72,28 @@ alternative_cb kvm_update_va_mask
alternative_cb_end
.endm
+/*
+ * Convert a kernel image address to a PA
+ * reg: kernel address to be converted in place
+ * tmp: temporary register
+ *
+ * The actual code generation takes place in kvm_get_kimage_voffset, and
+ * the instructions below are only there to reserve the space and
+ * perform the register allocation (kvm_get_kimage_voffset uses the
+ * specific registers encoded in the instructions).
+ */
+.macro kimg_pa reg, tmp
+alternative_cb kvm_get_kimage_voffset
+ movz \tmp, #0
+ movk \tmp, #0, lsl #16
+ movk \tmp, #0, lsl #32
+ movk \tmp, #0, lsl #48
+alternative_cb_end
+
+ /* reg = __pa(reg) */
+ sub \reg, \reg, \tmp
+.endm
+
#else
#include <linux/pgtable.h>
@@ -98,6 +120,24 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
+static __always_inline unsigned long __kimg_hyp_va(unsigned long v)
+{
+ unsigned long offset;
+
+ asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
+ "movk %0, #0, lsl #16\n"
+ "movk %0, #0, lsl #32\n"
+ "movk %0, #0, lsl #48\n",
+ kvm_update_kimg_phys_offset)
+ : "=r" (offset));
+
+ return __kern_hyp_va((v - offset) | PAGE_OFFSET);
+}
+
+#define kimg_fn_hyp_va(v) ((typeof(*v))(__kimg_hyp_va((unsigned long)(v))))
+
+#define kimg_fn_ptr(x) (typeof(x) **)(x)
+
/*
* We currently support using a VM-specified IPA size. For backward
* compatibility, the default IPA size is fixed to 40bits.
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index e2ef4c2edf06..500efe405b48 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -465,6 +465,7 @@
#define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7)
+#define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0)
#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index c615b285ff5b..4b32588918d9 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -64,13 +64,12 @@ __efistub__ctype = _ctype;
/* Alternative callbacks for init-time patching of nVHE hyp code. */
KVM_NVHE_ALIAS(kvm_patch_vector_branch);
KVM_NVHE_ALIAS(kvm_update_va_mask);
+KVM_NVHE_ALIAS(kvm_update_kimg_phys_offset);
+KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
/* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(kvm_vgic_global_state);
-/* Kernel constant needed to compute idmap addresses. */
-KVM_NVHE_ALIAS(kimage_voffset);
-
/* Kernel symbols used to call panic() from nVHE hyp code (via ERET). */
KVM_NVHE_ALIAS(__hyp_panic_string);
KVM_NVHE_ALIAS(panic);
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index ed27f06a31ba..fe2740b224cf 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -13,8 +13,6 @@
.text
SYM_FUNC_START(__host_exit)
- stp x0, x1, [sp, #-16]!
-
get_host_ctxt x0, x1
/* Store the host regs x2 and x3 */
@@ -99,13 +97,15 @@ SYM_FUNC_END(__hyp_do_panic)
mrs x0, esr_el2
lsr x0, x0, #ESR_ELx_EC_SHIFT
cmp x0, #ESR_ELx_EC_HVC64
- ldp x0, x1, [sp], #16
b.ne __host_exit
+ ldp x0, x1, [sp] // Don't fixup the stack yet
+
/* Check for a stub HVC call */
cmp x0, #HVC_STUB_HCALL_NR
b.hs __host_exit
+ add sp, sp, #16
/*
* Compute the idmap address of __kvm_handle_stub_hvc and
* jump there. Since we use kimage_voffset, do not use the
@@ -115,10 +115,7 @@ SYM_FUNC_END(__hyp_do_panic)
* Preserve x0-x4, which may contain stub parameters.
*/
ldr x5, =__kvm_handle_stub_hvc
- ldr_l x6, kimage_voffset
-
- /* x5 = __pa(x5) */
- sub x5, x5, x6
+ kimg_pa x5, x6
br x5
.L__vect_end\@:
.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index e2eafe2c93af..82df7fc24760 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -12,106 +12,150 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
-#include <kvm/arm_hypercalls.h>
-
-static void handle_host_hcall(unsigned long func_id,
- struct kvm_cpu_context *host_ctxt)
-{
- unsigned long ret = 0;
-
- switch (func_id) {
- case KVM_HOST_SMCCC_FUNC(__kvm_vcpu_run): {
- unsigned long r1 = host_ctxt->regs.regs[1];
- struct kvm_vcpu *vcpu = (struct kvm_vcpu *)r1;
-
- ret = __kvm_vcpu_run(kern_hyp_va(vcpu));
- break;
- }
- case KVM_HOST_SMCCC_FUNC(__kvm_flush_vm_context):
- __kvm_flush_vm_context();
- break;
- case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid_ipa): {
- unsigned long r1 = host_ctxt->regs.regs[1];
- struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
- phys_addr_t ipa = host_ctxt->regs.regs[2];
- int level = host_ctxt->regs.regs[3];
-
- __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
- break;
- }
- case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid): {
- unsigned long r1 = host_ctxt->regs.regs[1];
- struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
-
- __kvm_tlb_flush_vmid(kern_hyp_va(mmu));
- break;
- }
- case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_local_vmid): {
- unsigned long r1 = host_ctxt->regs.regs[1];
- struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
-
- __kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
- break;
- }
- case KVM_HOST_SMCCC_FUNC(__kvm_timer_set_cntvoff): {
- u64 cntvoff = host_ctxt->regs.regs[1];
-
- __kvm_timer_set_cntvoff(cntvoff);
- break;
- }
- case KVM_HOST_SMCCC_FUNC(__kvm_enable_ssbs):
- __kvm_enable_ssbs();
- break;
- case KVM_HOST_SMCCC_FUNC(__vgic_v3_get_ich_vtr_el2):
- ret = __vgic_v3_get_ich_vtr_el2();
- break;
- case KVM_HOST_SMCCC_FUNC(__vgic_v3_read_vmcr):
- ret = __vgic_v3_read_vmcr();
- break;
- case KVM_HOST_SMCCC_FUNC(__vgic_v3_write_vmcr): {
- u32 vmcr = host_ctxt->regs.regs[1];
-
- __vgic_v3_write_vmcr(vmcr);
- break;
- }
- case KVM_HOST_SMCCC_FUNC(__vgic_v3_init_lrs):
- __vgic_v3_init_lrs();
- break;
- case KVM_HOST_SMCCC_FUNC(__kvm_get_mdcr_el2):
- ret = __kvm_get_mdcr_el2();
- break;
- case KVM_HOST_SMCCC_FUNC(__vgic_v3_save_aprs): {
- unsigned long r1 = host_ctxt->regs.regs[1];
- struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1;
-
- __vgic_v3_save_aprs(kern_hyp_va(cpu_if));
- break;
- }
- case KVM_HOST_SMCCC_FUNC(__vgic_v3_restore_aprs): {
- unsigned long r1 = host_ctxt->regs.regs[1];
- struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1;
-
- __vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
- break;
- }
- default:
- /* Invalid host HVC. */
- host_ctxt->regs.regs[0] = SMCCC_RET_NOT_SUPPORTED;
- return;
- }
-
- host_ctxt->regs.regs[0] = SMCCC_RET_SUCCESS;
- host_ctxt->regs.regs[1] = ret;
+#define cpu_reg(ctxt, r) (ctxt)->regs.regs[r]
+#define DECLARE_REG(type, name, ctxt, reg) \
+ type name = (type)cpu_reg(ctxt, (reg))
+
+static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
+
+ cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
+}
+
+static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
+{
+ __kvm_flush_vm_context();
+}
+
+static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
+ DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
+ DECLARE_REG(int, level, host_ctxt, 3);
+
+ __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
+}
+
+static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
+
+ __kvm_tlb_flush_vmid(kern_hyp_va(mmu));
+}
+
+static void handle___kvm_tlb_flush_local_vmid(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
+
+ __kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
+}
+
+static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
+{
+ __kvm_timer_set_cntvoff(cpu_reg(host_ctxt, 1));
+}
+
+static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
+{
+ u64 tmp;
+
+ tmp = read_sysreg_el2(SYS_SCTLR);
+ tmp |= SCTLR_ELx_DSSBS;
+ write_sysreg_el2(tmp, SYS_SCTLR);
+}
+
+static void handle___vgic_v3_get_ich_vtr_el2(struct kvm_cpu_context *host_ctxt)
+{
+ cpu_reg(host_ctxt, 1) = __vgic_v3_get_ich_vtr_el2();
+}
+
+static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
+{
+ cpu_reg(host_ctxt, 1) = __vgic_v3_read_vmcr();
+}
+
+static void handle___vgic_v3_write_vmcr(struct kvm_cpu_context *host_ctxt)
+{
+ __vgic_v3_write_vmcr(cpu_reg(host_ctxt, 1));
+}
+
+static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
+{
+ __vgic_v3_init_lrs();
+}
+
+static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt)
+{
+ cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2();
+}
+
+static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
+
+ __vgic_v3_save_aprs(kern_hyp_va(cpu_if));
+}
+
+static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
+
+ __vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
+}
+
+typedef void (*hcall_t)(struct kvm_cpu_context *);
+
+#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = kimg_fn_ptr(handle_##x)
+
+static const hcall_t *host_hcall[] = {
+ HANDLE_FUNC(__kvm_vcpu_run),
+ HANDLE_FUNC(__kvm_flush_vm_context),
+ HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
+ HANDLE_FUNC(__kvm_tlb_flush_vmid),
+ HANDLE_FUNC(__kvm_tlb_flush_local_vmid),
+ HANDLE_FUNC(__kvm_timer_set_cntvoff),
+ HANDLE_FUNC(__kvm_enable_ssbs),
+ HANDLE_FUNC(__vgic_v3_get_ich_vtr_el2),
+ HANDLE_FUNC(__vgic_v3_read_vmcr),
+ HANDLE_FUNC(__vgic_v3_write_vmcr),
+ HANDLE_FUNC(__vgic_v3_init_lrs),
+ HANDLE_FUNC(__kvm_get_mdcr_el2),
+ HANDLE_FUNC(__vgic_v3_save_aprs),
+ HANDLE_FUNC(__vgic_v3_restore_aprs),
+};
+
+static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(unsigned long, id, host_ctxt, 0);
+ const hcall_t *kfn;
+ hcall_t hfn;
+
+ id -= KVM_HOST_SMCCC_ID(0);
+
+ if (unlikely(id >= ARRAY_SIZE(host_hcall)))
+ goto inval;
+
+ kfn = host_hcall[id];
+ if (unlikely(!kfn))
+ goto inval;
+
+ cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
+
+ hfn = kimg_fn_hyp_va(kfn);
+ hfn(host_ctxt);
+
+ return;
+inval:
+ cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
}
void handle_trap(struct kvm_cpu_context *host_ctxt)
{
u64 esr = read_sysreg_el2(SYS_ESR);
- unsigned long func_id;
- if (ESR_ELx_EC(esr) != ESR_ELx_EC_HVC64)
+ if (unlikely(ESR_ELx_EC(esr) != ESR_ELx_EC_HVC64))
hyp_panic();
- func_id = host_ctxt->regs.regs[0];
- handle_host_hcall(func_id, host_ctxt);
+ handle_host_hcall(host_ctxt);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c b/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c
index 88a25fc8fcd3..29305022bc04 100644
--- a/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c
@@ -33,14 +33,3 @@ void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
__sysreg_restore_user_state(ctxt);
__sysreg_restore_el2_return_state(ctxt);
}
-
-void __kvm_enable_ssbs(void)
-{
- u64 tmp;
-
- asm volatile(
- "mrs %0, sctlr_el2\n"
- "orr %0, %0, %1\n"
- "msr sctlr_el2, %0"
- : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
-}
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index e0404bcab019..d61117805de0 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -11,6 +11,7 @@
#include <asm/debug-monitors.h>
#include <asm/insn.h>
#include <asm/kvm_mmu.h>
+#include <asm/memory.h>
/*
* The LSB of the HYP VA tag
@@ -201,3 +202,58 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
AARCH64_INSN_BRANCH_NOLINK);
*updptr++ = cpu_to_le32(insn);
}
+
+static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+ u32 insn, oinsn, rd;
+
+ BUG_ON(nr_inst != 4);
+
+ /* Compute target register */
+ oinsn = le32_to_cpu(*origptr);
+ rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
+
+ /* movz rd, #(val & 0xffff) */
+ insn = aarch64_insn_gen_movewide(rd,
+ (u16)val,
+ 0,
+ AARCH64_INSN_VARIANT_64BIT,
+ AARCH64_INSN_MOVEWIDE_ZERO);
+ *updptr++ = cpu_to_le32(insn);
+
+ /* movk rd, #((val >> 16) & 0xffff), lsl #16 */
+ insn = aarch64_insn_gen_movewide(rd,
+ (u16)(val >> 16),
+ 16,
+ AARCH64_INSN_VARIANT_64BIT,
+ AARCH64_INSN_MOVEWIDE_KEEP);
+ *updptr++ = cpu_to_le32(insn);
+
+ /* movk rd, #((val >> 32) & 0xffff), lsl #32 */
+ insn = aarch64_insn_gen_movewide(rd,
+ (u16)(val >> 32),
+ 32,
+ AARCH64_INSN_VARIANT_64BIT,
+ AARCH64_INSN_MOVEWIDE_KEEP);
+ *updptr++ = cpu_to_le32(insn);
+
+ /* movk rd, #((val >> 48) & 0xffff), lsl #48 */
+ insn = aarch64_insn_gen_movewide(rd,
+ (u16)(val >> 48),
+ 48,
+ AARCH64_INSN_VARIANT_64BIT,
+ AARCH64_INSN_MOVEWIDE_KEEP);
+ *updptr++ = cpu_to_le32(insn);
+}
+
+void kvm_update_kimg_phys_offset(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+ generate_mov_q(kimage_voffset + PHYS_OFFSET, origptr, updptr, nr_inst);
+}
+
+void kvm_get_kimage_voffset(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+ generate_mov_q(kimage_voffset, origptr, updptr, nr_inst);
+}