aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/Kconfig12
-rw-r--r--arch/arm/kvm/arm.c147
-rw-r--r--arch/arm/kvm/emulate.c74
-rw-r--r--arch/arm/kvm/guest.c12
-rw-r--r--arch/arm/kvm/handle_exit.c3
-rw-r--r--arch/arm/kvm/interrupts.S14
-rw-r--r--arch/arm/kvm/interrupts_head.S6
-rw-r--r--arch/arm/kvm/mmio.c8
-rw-r--r--arch/arm/kvm/mmu.c37
-rw-r--r--arch/arm/kvm/psci.c40
-rw-r--r--arch/arm/kvm/reset.c4
-rw-r--r--arch/arm/kvm/trace.h10
12 files changed, 238 insertions, 129 deletions
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index bfb915d05665..95a000515e43 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
depends on MMU && OF
select PREEMPT_NOTIFIERS
select ANON_INODES
+ select ARM_GIC
select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
@@ -45,15 +46,6 @@ config KVM_ARM_HOST
---help---
Provides host support for ARM processors.
-config KVM_ARM_MAX_VCPUS
- int "Number maximum supported virtual CPUs per VM"
- depends on KVM_ARM_HOST
- default 4
- help
- Static number of max supported virtual CPUs per VM.
-
- If you choose a high number, the vcpu structures will be quite
- large, so only choose a reasonable number that you expect to
- actually use.
+source drivers/vhost/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index bc738d2b8392..dda1959f0dde 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -44,6 +44,7 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_psci.h>
+#include <asm/sections.h>
#ifdef REQUIRES_VIRT
__asm__(".arch_extension virt");
@@ -58,9 +59,12 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
/* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
-static u8 kvm_next_vmid;
+static u32 kvm_next_vmid;
+static unsigned int kvm_vmid_bits __read_mostly;
static DEFINE_SPINLOCK(kvm_vmid_lock);
+static bool vgic_present;
+
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{
BUG_ON(preemptible());
@@ -125,13 +129,15 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (ret)
goto out_free_stage2_pgd;
+ kvm_vgic_early_init(kvm);
kvm_timer_init(kvm);
/* Mark the initial VMID generation invalid */
kvm->arch.vmid_gen = 0;
/* The maximum number of VCPUs is limited by the host's GIC model */
- kvm->arch.max_vcpus = kvm_vgic_get_max_vcpus();
+ kvm->arch.max_vcpus = vgic_present ?
+ kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
return ret;
out_free_stage2_pgd:
@@ -171,6 +177,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int r;
switch (ext) {
case KVM_CAP_IRQCHIP:
+ r = vgic_present;
+ break;
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_USER_MEMORY:
@@ -249,6 +257,7 @@ out:
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
+ kvm_vgic_vcpu_early_init(vcpu);
}
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
@@ -269,6 +278,16 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return kvm_timer_should_fire(vcpu);
}
+void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
+{
+ kvm_timer_schedule(vcpu);
+}
+
+void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
+{
+ kvm_timer_unschedule(vcpu);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
/* Force users to call KVM_ARM_VCPU_INIT */
@@ -278,6 +297,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
/* Set up the timer */
kvm_timer_vcpu_init(vcpu);
+ kvm_arm_reset_debug_ptr(vcpu);
+
return 0;
}
@@ -301,17 +322,10 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_arm_set_running_vcpu(NULL);
}
-int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
- struct kvm_guest_debug *dbg)
-{
- return -EINVAL;
-}
-
-
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- if (vcpu->arch.pause)
+ if (vcpu->arch.power_off)
mp_state->mp_state = KVM_MP_STATE_STOPPED;
else
mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
@@ -324,10 +338,10 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
{
switch (mp_state->mp_state) {
case KVM_MP_STATE_RUNNABLE:
- vcpu->arch.pause = false;
+ vcpu->arch.power_off = false;
break;
case KVM_MP_STATE_STOPPED:
- vcpu->arch.pause = true;
+ vcpu->arch.power_off = true;
break;
default:
return -EINVAL;
@@ -345,7 +359,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
*/
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
- return !!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v);
+ return ((!!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v))
+ && !v->arch.power_off && !v->arch.pause);
}
/* Just ensure a guest exit from a particular CPU */
@@ -425,11 +440,12 @@ static void update_vttbr(struct kvm *kvm)
kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
kvm->arch.vmid = kvm_next_vmid;
kvm_next_vmid++;
+ kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
/* update vttbr to be used with the new vmid */
pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm));
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
- vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
+ vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
kvm->arch.vttbr = pgd_phys | vmid;
spin_unlock(&kvm_vmid_lock);
@@ -449,7 +465,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Map the VGIC hardware resources before running a vcpu the first
* time on this VM.
*/
- if (unlikely(!vgic_ready(kvm))) {
+ if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
ret = kvm_vgic_map_resources(kvm);
if (ret)
return ret;
@@ -471,11 +487,38 @@ bool kvm_arch_intc_initialized(struct kvm *kvm)
return vgic_initialized(kvm);
}
-static void vcpu_pause(struct kvm_vcpu *vcpu)
+static void kvm_arm_halt_guest(struct kvm *kvm) __maybe_unused;
+static void kvm_arm_resume_guest(struct kvm *kvm) __maybe_unused;
+
+static void kvm_arm_halt_guest(struct kvm *kvm)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ vcpu->arch.pause = true;
+ force_vm_exit(cpu_all_mask);
+}
+
+static void kvm_arm_resume_guest(struct kvm *kvm)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
+
+ vcpu->arch.pause = false;
+ wake_up_interruptible(wq);
+ }
+}
+
+static void vcpu_sleep(struct kvm_vcpu *vcpu)
{
wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
- wait_event_interruptible(*wq, !vcpu->arch.pause);
+ wait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
+ (!vcpu->arch.pause)));
}
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
@@ -525,13 +568,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
update_vttbr(vcpu->kvm);
- if (vcpu->arch.pause)
- vcpu_pause(vcpu);
+ if (vcpu->arch.power_off || vcpu->arch.pause)
+ vcpu_sleep(vcpu);
- kvm_vgic_flush_hwstate(vcpu);
+ /*
+ * Preparing the interrupts to be injected also
+ * involves poking the GIC, which must be done in a
+ * non-preemptible context.
+ */
+ preempt_disable();
kvm_timer_flush_hwstate(vcpu);
+ kvm_vgic_flush_hwstate(vcpu);
- preempt_disable();
local_irq_disable();
/*
@@ -542,14 +590,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
run->exit_reason = KVM_EXIT_INTR;
}
- if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
+ if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
+ vcpu->arch.power_off || vcpu->arch.pause) {
local_irq_enable();
- preempt_enable();
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
+ preempt_enable();
continue;
}
+ kvm_arm_setup_debug(vcpu);
+
/**************************************************************
* Enter the guest
*/
@@ -560,10 +611,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
+ vcpu->stat.exits++;
/*
* Back from guest
*************************************************************/
+ kvm_arm_clear_debug(vcpu);
+
/*
* We may have taken a host interrupt in HYP mode (ie
* while executing the guest). This interrupt is still
@@ -585,13 +639,19 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
* guest time.
*/
kvm_guest_exit();
- trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
- preempt_enable();
-
+ trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
+ /*
+ * We must sync the timer state before the vgic state so that
+ * the vgic can properly sample the updated state of the
+ * interrupt line.
+ */
kvm_timer_sync_hwstate(vcpu);
+
kvm_vgic_sync_hwstate(vcpu);
+ preempt_enable();
+
ret = handle_exit(vcpu, run, ret);
}
@@ -753,12 +813,12 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
vcpu_reset_hcr(vcpu);
/*
- * Handle the "start in power-off" case by marking the VCPU as paused.
+ * Handle the "start in power-off" case.
*/
if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
- vcpu->arch.pause = true;
+ vcpu->arch.power_off = true;
else
- vcpu->arch.pause = false;
+ vcpu->arch.power_off = false;
return 0;
}
@@ -862,6 +922,8 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
switch (dev_id) {
case KVM_ARM_DEVICE_VGIC_V2:
+ if (!vgic_present)
+ return -ENXIO;
return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
default:
return -ENODEV;
@@ -876,6 +938,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
switch (ioctl) {
case KVM_CREATE_IRQCHIP: {
+ if (!vgic_present)
+ return -ENXIO;
return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
}
case KVM_ARM_SET_DEVICE_ADDR: {
@@ -921,6 +985,8 @@ static void cpu_init_hyp_mode(void *dummy)
vector_ptr = (unsigned long)__kvm_hyp_vector;
__cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
+
+ kvm_arm_init_debug();
}
static int hyp_init_cpu_notify(struct notifier_block *self,
@@ -1014,6 +1080,12 @@ static int init_hyp_mode(void)
goto out_free_mappings;
}
+ err = create_hyp_mappings(__start_rodata, __end_rodata);
+ if (err) {
+ kvm_err("Cannot map rodata section\n");
+ goto out_free_mappings;
+ }
+
/*
* Map the Hyp stack pages
*/
@@ -1058,15 +1130,24 @@ static int init_hyp_mode(void)
* Init HYP view of VGIC
*/
err = kvm_vgic_hyp_init();
- if (err)
+ switch (err) {
+ case 0:
+ vgic_present = true;
+ break;
+ case -ENODEV:
+ case -ENXIO:
+ vgic_present = false;
+ break;
+ default:
goto out_free_context;
+ }
/*
* Init HYP architected timer support
*/
err = kvm_timer_hyp_init();
if (err)
- goto out_free_mappings;
+ goto out_free_context;
#ifndef CONFIG_HOTPLUG_CPU
free_boot_hyp_pgd();
@@ -1074,6 +1155,10 @@ static int init_hyp_mode(void)
kvm_perf_init();
+ /* set size of VMID supported by CPU */
+ kvm_vmid_bits = kvm_get_vmid_bits();
+ kvm_info("%d-bit VMID\n", kvm_vmid_bits);
+
kvm_info("Hyp mode initialized successfully\n");
return 0;
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index d6c005283678..dc99159857b4 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -275,6 +275,40 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
return vbar;
}
+/*
+ * Switch to an exception mode, updating both CPSR and SPSR. Follow
+ * the logic described in AArch32.EnterMode() from the ARMv8 ARM.
+ */
+static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode)
+{
+ unsigned long cpsr = *vcpu_cpsr(vcpu);
+ u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
+
+ *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode;
+
+ switch (mode) {
+ case FIQ_MODE:
+ *vcpu_cpsr(vcpu) |= PSR_F_BIT;
+ /* Fall through */
+ case ABT_MODE:
+ case IRQ_MODE:
+ *vcpu_cpsr(vcpu) |= PSR_A_BIT;
+ /* Fall through */
+ default:
+ *vcpu_cpsr(vcpu) |= PSR_I_BIT;
+ }
+
+ *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
+
+ if (sctlr & SCTLR_TE)
+ *vcpu_cpsr(vcpu) |= PSR_T_BIT;
+ if (sctlr & SCTLR_EE)
+ *vcpu_cpsr(vcpu) |= PSR_E_BIT;
+
+ /* Note: These now point to the mode banked copies */
+ *vcpu_spsr(vcpu) = cpsr;
+}
+
/**
* kvm_inject_undefined - inject an undefined exception into the guest
* @vcpu: The VCPU to receive the undefined exception
@@ -286,29 +320,13 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
*/
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
{
- unsigned long new_lr_value;
- unsigned long new_spsr_value;
unsigned long cpsr = *vcpu_cpsr(vcpu);
- u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
bool is_thumb = (cpsr & PSR_T_BIT);
u32 vect_offset = 4;
u32 return_offset = (is_thumb) ? 2 : 4;
- new_spsr_value = cpsr;
- new_lr_value = *vcpu_pc(vcpu) - return_offset;
-
- *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
- *vcpu_cpsr(vcpu) |= PSR_I_BIT;
- *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
-
- if (sctlr & SCTLR_TE)
- *vcpu_cpsr(vcpu) |= PSR_T_BIT;
- if (sctlr & SCTLR_EE)
- *vcpu_cpsr(vcpu) |= PSR_E_BIT;
-
- /* Note: These now point to UND banked copies */
- *vcpu_spsr(vcpu) = cpsr;
- *vcpu_reg(vcpu, 14) = new_lr_value;
+ kvm_update_psr(vcpu, UND_MODE);
+ *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
/* Branch to exception vector */
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
@@ -320,30 +338,14 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
*/
static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
{
- unsigned long new_lr_value;
- unsigned long new_spsr_value;
unsigned long cpsr = *vcpu_cpsr(vcpu);
- u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
bool is_thumb = (cpsr & PSR_T_BIT);
u32 vect_offset;
u32 return_offset = (is_thumb) ? 4 : 0;
bool is_lpae;
- new_spsr_value = cpsr;
- new_lr_value = *vcpu_pc(vcpu) + return_offset;
-
- *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
- *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
- *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
-
- if (sctlr & SCTLR_TE)
- *vcpu_cpsr(vcpu) |= PSR_T_BIT;
- if (sctlr & SCTLR_EE)
- *vcpu_cpsr(vcpu) |= PSR_E_BIT;
-
- /* Note: These now point to ABT banked copies */
- *vcpu_spsr(vcpu) = cpsr;
- *vcpu_reg(vcpu, 14) = new_lr_value;
+ kvm_update_psr(vcpu, ABT_MODE);
+ *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
if (is_pabt)
vect_offset = 12;
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index d503fbb787d3..5fa69d7bae58 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -33,6 +33,12 @@
#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
struct kvm_stats_debugfs_item debugfs_entries[] = {
+ VCPU_STAT(hvc_exit_stat),
+ VCPU_STAT(wfe_exit_stat),
+ VCPU_STAT(wfi_exit_stat),
+ VCPU_STAT(mmio_exit_user),
+ VCPU_STAT(mmio_exit_kernel),
+ VCPU_STAT(exits),
{ NULL }
};
@@ -290,3 +296,9 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
{
return -EINVAL;
}
+
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
+{
+ return -EINVAL;
+}
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 95f12b2ccdcb..3ede90d8b20b 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -42,6 +42,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
kvm_vcpu_hvc_get_imm(vcpu));
+ vcpu->stat.hvc_exit_stat++;
ret = kvm_psci_call(vcpu);
if (ret < 0) {
@@ -89,9 +90,11 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
trace_kvm_wfx(*vcpu_pc(vcpu), true);
+ vcpu->stat.wfe_exit_stat++;
kvm_vcpu_on_spin(vcpu);
} else {
trace_kvm_wfx(*vcpu_pc(vcpu), false);
+ vcpu->stat.wfi_exit_stat++;
kvm_vcpu_block(vcpu);
}
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 568494dbbbb5..900ef6dd8f72 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -361,10 +361,6 @@ hyp_hvc:
@ Check syndrome register
mrc p15, 4, r1, c5, c2, 0 @ HSR
lsr r0, r1, #HSR_EC_SHIFT
-#ifdef CONFIG_VFPv3
- cmp r0, #HSR_EC_CP_0_13
- beq switch_to_guest_vfp
-#endif
cmp r0, #HSR_EC_HVC
bne guest_trap @ Not HVC instr.
@@ -378,7 +374,10 @@ hyp_hvc:
cmp r2, #0
bne guest_trap @ Guest called HVC
-host_switch_to_hyp:
+ /*
+ * Getting here means host called HVC, we shift parameters and branch
+ * to Hyp function.
+ */
pop {r0, r1, r2}
/* Check for __hyp_get_vectors */
@@ -409,6 +408,10 @@ guest_trap:
@ Check if we need the fault information
lsr r1, r1, #HSR_EC_SHIFT
+#ifdef CONFIG_VFPv3
+ cmp r1, #HSR_EC_CP_0_13
+ beq switch_to_guest_vfp
+#endif
cmp r1, #HSR_EC_IABT
mrceq p15, 4, r2, c6, c0, 2 @ HIFAR
beq 2f
@@ -477,7 +480,6 @@ guest_trap:
*/
#ifdef CONFIG_VFPv3
switch_to_guest_vfp:
- load_vcpu @ Load VCPU pointer to r0
push {r3-r7}
@ NEON/VFP used. Turn on VFP access.
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 702740d37465..51a59504bef4 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -515,8 +515,7 @@ ARM_BE8(rev r6, r6 )
mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
- bic r2, #1 @ Clear ENABLE
- mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
+
isb
mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
@@ -529,6 +528,9 @@ ARM_BE8(rev r6, r6 )
mcrr p15, 4, r2, r2, c14 @ CNTVOFF
1:
+ mov r2, #0 @ Clear ENABLE
+ mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
+
@ Allow physical timer/counter access for the host
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 974b1c606d04..7f33b2056ae6 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
data);
data = vcpu_data_host_to_guest(vcpu, data, len);
- *vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
+ vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
}
return 0;
@@ -186,7 +186,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
rt = vcpu->arch.mmio_decode.rt;
if (is_write) {
- data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
+ data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
+ len);
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
mmio_write_buf(data_buf, len, data);
@@ -209,8 +210,11 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
if (!ret) {
/* We handled the access successfully in the kernel. */
+ vcpu->stat.mmio_exit_kernel++;
kvm_handle_mmio_return(vcpu, run);
return 1;
+ } else {
+ vcpu->stat.mmio_exit_user++;
}
run->exit_reason = KVM_EXIT_MMIO;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 7b4201294187..aba61fd3697a 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
__kvm_flush_dcache_pud(pud);
}
+static bool kvm_is_device_pfn(unsigned long pfn)
+{
+ return !pfn_valid(pfn);
+}
+
/**
* stage2_dissolve_pmd() - clear and flush huge PMD entry
* @kvm: pointer to kvm structure.
@@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
kvm_tlb_flush_vmid_ipa(kvm, addr);
/* No need to invalidate the cache for device mappings */
- if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+ if (!kvm_is_device_pfn(pte_pfn(old_pte)))
kvm_flush_dcache_pte(old_pte);
put_page(virt_to_page(pte));
@@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
pte = pte_offset_kernel(pmd, addr);
do {
- if (!pte_none(*pte) &&
- (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+ if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
kvm_flush_dcache_pte(*pte);
} while (pte++, addr += PAGE_SIZE, addr != end);
}
@@ -652,9 +656,9 @@ static void *kvm_alloc_hwpgd(void)
* kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
* @kvm: The KVM struct pointer for the VM.
*
- * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
- * support either full 40-bit input addresses or limited to 32-bit input
- * addresses). Clears the allocated pages.
+ * Allocates only the stage-2 HW PGD level table(s) (can support either full
+ * 40-bit input addresses or limited to 32-bit input addresses). Clears the
+ * allocated pages.
*
* Note we don't need locking here as this is only called when the VM is
* created, which can only be done once.
@@ -988,9 +992,9 @@ out:
return ret;
}
-static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
+static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
{
- pfn_t pfn = *pfnp;
+ kvm_pfn_t pfn = *pfnp;
gfn_t gfn = *ipap >> PAGE_SHIFT;
if (PageTransCompound(pfn_to_page(pfn))) {
@@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
return kvm_vcpu_dabt_iswrite(vcpu);
}
-static bool kvm_is_device_pfn(unsigned long pfn)
-{
- return !pfn_valid(pfn);
-}
-
/**
* stage2_wp_ptes - write protect PMD range
* @pmd: pointer to pmd entry
@@ -1202,7 +1201,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
-static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
unsigned long size, bool uncached)
{
__coherent_cache_guest_page(vcpu, pfn, size, uncached);
@@ -1219,7 +1218,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
struct vm_area_struct *vma;
- pfn_t pfn;
+ kvm_pfn_t pfn;
pgprot_t mem_type = PAGE_S2;
bool fault_ipa_uncached;
bool logging_active = memslot_is_logging(memslot);
@@ -1347,7 +1346,7 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
{
pmd_t *pmd;
pte_t *pte;
- pfn_t pfn;
+ kvm_pfn_t pfn;
bool pfn_valid = false;
trace_kvm_access_fault(fault_ipa);
@@ -1792,8 +1791,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (vma->vm_flags & VM_PFNMAP) {
gpa_t gpa = mem->guest_phys_addr +
(vm_start - mem->userspace_addr);
- phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
- vm_start - vma->vm_start;
+ phys_addr_t pa;
+
+ pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 4b94b513168d..a9b3b905e661 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -63,7 +63,7 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
{
- vcpu->arch.pause = true;
+ vcpu->arch.power_off = true;
}
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
@@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
unsigned long context_id;
phys_addr_t target_pc;
- cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
+ cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
if (vcpu_mode_is_32bit(source_vcpu))
cpu_id &= ~((u32) 0);
@@ -87,15 +87,15 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/
if (!vcpu)
return PSCI_RET_INVALID_PARAMS;
- if (!vcpu->arch.pause) {
+ if (!vcpu->arch.power_off) {
if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
return PSCI_RET_ALREADY_ON;
else
return PSCI_RET_INVALID_PARAMS;
}
- target_pc = *vcpu_reg(source_vcpu, 2);
- context_id = *vcpu_reg(source_vcpu, 3);
+ target_pc = vcpu_get_reg(source_vcpu, 2);
+ context_id = vcpu_get_reg(source_vcpu, 3);
kvm_reset_vcpu(vcpu);
@@ -114,8 +114,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
* NOTE: We always update r0 (or x0) because for PSCI v0.1
* the general puspose registers are undefined upon CPU_ON.
*/
- *vcpu_reg(vcpu, 0) = context_id;
- vcpu->arch.pause = false;
+ vcpu_set_reg(vcpu, 0, context_id);
+ vcpu->arch.power_off = false;
smp_mb(); /* Make sure the above is visible */
wq = kvm_arch_vcpu_wq(vcpu);
@@ -126,7 +126,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
{
- int i;
+ int i, matching_cpus = 0;
unsigned long mpidr;
unsigned long target_affinity;
unsigned long target_affinity_mask;
@@ -134,8 +134,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tmp;
- target_affinity = *vcpu_reg(vcpu, 1);
- lowest_affinity_level = *vcpu_reg(vcpu, 2);
+ target_affinity = vcpu_get_reg(vcpu, 1);
+ lowest_affinity_level = vcpu_get_reg(vcpu, 2);
/* Determine target affinity mask */
target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
@@ -151,12 +151,16 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
*/
kvm_for_each_vcpu(i, tmp, kvm) {
mpidr = kvm_vcpu_get_mpidr_aff(tmp);
- if (((mpidr & target_affinity_mask) == target_affinity) &&
- !tmp->arch.pause) {
- return PSCI_0_2_AFFINITY_LEVEL_ON;
+ if ((mpidr & target_affinity_mask) == target_affinity) {
+ matching_cpus++;
+ if (!tmp->arch.power_off)
+ return PSCI_0_2_AFFINITY_LEVEL_ON;
}
}
+ if (!matching_cpus)
+ return PSCI_RET_INVALID_PARAMS;
+
return PSCI_0_2_AFFINITY_LEVEL_OFF;
}
@@ -175,7 +179,7 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
* re-initialized.
*/
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
- tmp->arch.pause = true;
+ tmp->arch.power_off = true;
kvm_vcpu_kick(tmp);
}
@@ -205,7 +209,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
int ret = 1;
- unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
+ unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
switch (psci_fn) {
@@ -269,13 +273,13 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
break;
}
- *vcpu_reg(vcpu, 0) = val;
+ vcpu_set_reg(vcpu, 0, val);
return ret;
}
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{
- unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
+ unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
switch (psci_fn) {
@@ -291,7 +295,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
break;
}
- *vcpu_reg(vcpu, 0) = val;
+ vcpu_set_reg(vcpu, 0, val);
return 1;
}
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index f558c073c023..eeb85858d6bb 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -77,7 +77,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_reset_coprocs(vcpu);
/* Reset arch_timer context */
- kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
-
- return 0;
+ return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index 0ec35392d208..c25a88598eb0 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -25,21 +25,25 @@ TRACE_EVENT(kvm_entry,
);
TRACE_EVENT(kvm_exit,
- TP_PROTO(unsigned int exit_reason, unsigned long vcpu_pc),
- TP_ARGS(exit_reason, vcpu_pc),
+ TP_PROTO(int idx, unsigned int exit_reason, unsigned long vcpu_pc),
+ TP_ARGS(idx, exit_reason, vcpu_pc),
TP_STRUCT__entry(
+ __field( int, idx )
__field( unsigned int, exit_reason )
__field( unsigned long, vcpu_pc )
),
TP_fast_assign(
+ __entry->idx = idx;
__entry->exit_reason = exit_reason;
__entry->vcpu_pc = vcpu_pc;
),
- TP_printk("HSR_EC: 0x%04x, PC: 0x%08lx",
+ TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
+ __print_symbolic(__entry->idx, kvm_arm_exception_type),
__entry->exit_reason,
+ __print_symbolic(__entry->exit_reason, kvm_arm_exception_class),
__entry->vcpu_pc)
);