From c550d53934d821dbdd867ca314d417f2e918c72c Mon Sep 17 00:00:00 2001 From: James Hogan <james.hogan@imgtec.com> Date: Tue, 11 Oct 2016 23:14:39 +0100 Subject: KVM: MIPS: Remove duplicated ASIDs from vcpu MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The kvm_vcpu_arch structure contains both mm_structs for allocating MMU contexts (primarily the ASID) but it also copies the resulting ASIDs into guest_{user,kernel}_asid[] arrays which are referenced from uasm generated code. This duplication doesn't seem to serve any purpose, and it gets in the way of generalising the ASID handling across guest kernel/user modes, so lets just extract the ASID straight out of the mm_struct on demand, and in fact there are convenient cpu_context() and cpu_asid() macros for doing so. To reduce the verbosity of this code we do also add kern_mm and user_mm local variables where the kernel and user mm_structs are used. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/kvm/entry.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) (limited to 'arch/mips/kvm/entry.c') diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index e92fb190e2d6..f81888704caa 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c @@ -12,6 +12,7 @@ */ #include <linux/kvm_host.h> +#include <linux/log2.h> #include <asm/msa.h> #include <asm/setup.h> #include <asm/uasm.h> @@ -286,23 +287,26 @@ static void *kvm_mips_build_enter_guest(void *addr) uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL); uasm_i_xori(&p, T0, T0, KSU_USER); uasm_il_bnez(&p, &r, T0, label_kernel_asid); - UASM_i_ADDIU(&p, T1, K1, - offsetof(struct kvm_vcpu_arch, guest_kernel_asid)); + UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, + guest_kernel_mm.context.asid)); /* else user */ - UASM_i_ADDIU(&p, T1, K1, - offsetof(struct kvm_vcpu_arch, guest_user_asid)); + UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, + guest_user_mm.context.asid)); uasm_l_kernel_asid(&l, p); /* t1: contains the base of the ASID array, need to get the cpu id */ /* smp_processor_id */ uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP); - /* x4 */ - uasm_i_sll(&p, T2, T2, 2); + /* index the ASID array */ + uasm_i_sll(&p, T2, T2, ilog2(sizeof(long))); UASM_i_ADDU(&p, T3, T1, T2); - uasm_i_lw(&p, K0, 0, T3); + UASM_i_LW(&p, K0, 0, T3); #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE - /* x sizeof(struct cpuinfo_mips)/4 */ - uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4); + /* + * reuse ASID array offset + * cpuinfo_mips is a multiple of sizeof(long) + */ + uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long)); uasm_i_mul(&p, T2, T2, T3); UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask); -- cgit From 7faa6eec6991715d6c1d85c192738dcac405ab89 Mon Sep 17 00:00:00 2001 From: James Hogan <james.hogan@imgtec.com> Date: Fri, 7 Oct 2016 23:58:53 +0100 Subject: KVM: MIPS/T&E: Activate GVA page tables in guest context MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Activate the GVA page tables when in guest context. This will allow the normal Linux TLB refill handler to fill from it when guest memory is read, as well as preventing accidental reading from user memory. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/include/asm/mmu_context.h | 4 +++- arch/mips/kvm/entry.c | 16 +++++++++++++++- arch/mips/kvm/trap_emul.c | 10 ++++++---- 3 files changed, 24 insertions(+), 6 deletions(-) (limited to 'arch/mips/kvm/entry.c') diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index ddd57ade1aa8..16eb8521398e 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -29,9 +29,11 @@ do { \ } \ } while (0) +extern void tlbmiss_handler_setup_pgd(unsigned long); + +/* Note: This is also implemented with uasm in arch/mips/kvm/entry.c */ #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ do { \ - extern void tlbmiss_handler_setup_pgd(unsigned long); \ tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \ htw_set_pwbase((unsigned long)pgd); \ } while (0) diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index f81888704caa..f683d123172c 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c @@ -13,6 +13,7 @@ #include <linux/kvm_host.h> #include <linux/log2.h> +#include <asm/mmu_context.h> #include <asm/msa.h> #include <asm/setup.h> #include <asm/uasm.h> @@ -316,7 +317,20 @@ static void *kvm_mips_build_enter_guest(void *addr) #else uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); #endif - uasm_i_mtc0(&p, K0, C0_ENTRYHI); + + /* + * Set up KVM T&E GVA pgd. + * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): + * - call tlbmiss_handler_setup_pgd(mm->pgd) + * - but skips write into CP0_PWBase for now + */ + UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) - + (int)offsetof(struct mm_struct, context.asid), T1); + + UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); + uasm_i_jalr(&p, RA, T9); + uasm_i_mtc0(&p, K0, C0_ENTRYHI); + uasm_i_ehb(&p); /* Disable RDHWR access */ diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index 183150a963ec..f39d427649dc 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -704,6 +704,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; + struct mm_struct *mm; /* Allocate new kernel and user ASIDs if needed */ @@ -733,10 +734,9 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) * on the mode of the Guest (Kernel/User) */ if (current->flags & PF_VCPU) { - if (KVM_GUEST_KERNEL_MODE(vcpu)) - write_c0_entryhi(cpu_asid(cpu, kern_mm)); - else - write_c0_entryhi(cpu_asid(cpu, user_mm)); + mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; + write_c0_entryhi(cpu_asid(cpu, mm)); + TLBMISS_HANDLER_SETUP_PGD(mm->pgd); kvm_mips_suspend_mm(cpu); ehb(); } @@ -757,6 +757,7 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu) get_new_mmu_context(current->mm, cpu); } write_c0_entryhi(cpu_asid(cpu, current->mm)); + TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); kvm_mips_resume_mm(cpu); ehb(); } @@ -821,6 +822,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) asid_version_mask(cpu))) get_new_mmu_context(current->mm, cpu); write_c0_entryhi(cpu_asid(cpu, current->mm)); + TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); kvm_mips_resume_mm(cpu); htw_start(); -- cgit From 29b500b54ef379f1f3227b633dd477a4dd3cd62b Mon Sep 17 00:00:00 2001 From: James Hogan <james.hogan@imgtec.com> Date: Fri, 11 Nov 2016 14:08:32 +0000 Subject: KVM: MIPS: Support NetLogic KScratch registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit tlbex.c uses the implementation dependent $22 CP0 register group on NetLogic cores, with the help of the c0_kscratch() helper. Allow these registers to be allocated by the KVM entry code too instead of assuming KScratch registers are all $31, which will also allow pgd_reg to be handled since it is allocated that way. We also drop the masking of kscratch_mask with 0xfc, as it is redundant for the standard KScratch registers (Config4.KScrExist won't have the low 2 bits set anyway), and apparently not necessary for NetLogic. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/kvm/entry.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) (limited to 'arch/mips/kvm/entry.c') diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index f683d123172c..7424d3d566ff 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c @@ -91,6 +91,21 @@ static void *kvm_mips_build_ret_from_exit(void *addr); static void *kvm_mips_build_ret_to_guest(void *addr); static void *kvm_mips_build_ret_to_host(void *addr); +/* + * The version of this function in tlbex.c uses current_cpu_type(), but for KVM + * we assume symmetry. + */ +static int c0_kscratch(void) +{ + switch (boot_cpu_type()) { + case CPU_XLP: + case CPU_XLR: + return 22; + default: + return 31; + } +} + /** * kvm_mips_entry_setup() - Perform global setup for entry code. * @@ -105,18 +120,18 @@ int kvm_mips_entry_setup(void) * We prefer to use KScratchN registers if they are available over the * defaults above, which may not work on all cores. */ - unsigned int kscratch_mask = cpu_data[0].kscratch_mask & 0xfc; + unsigned int kscratch_mask = cpu_data[0].kscratch_mask; /* Pick a scratch register for storing VCPU */ if (kscratch_mask) { - scratch_vcpu[0] = 31; + scratch_vcpu[0] = c0_kscratch(); scratch_vcpu[1] = ffs(kscratch_mask) - 1; kscratch_mask &= ~BIT(scratch_vcpu[1]); } /* Pick a scratch register to use as a temp for saving state */ if (kscratch_mask) { - scratch_tmp[0] = 31; + scratch_tmp[0] = c0_kscratch(); scratch_tmp[1] = ffs(kscratch_mask) - 1; kscratch_mask &= ~BIT(scratch_tmp[1]); } @@ -132,7 +147,7 @@ static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp, UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); /* Save the temp scratch register value in cp0_cause of stack frame */ - if (scratch_tmp[0] == 31) { + if (scratch_tmp[0] == c0_kscratch()) { UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); } @@ -148,7 +163,7 @@ static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp, UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); - if (scratch_tmp[0] == 31) { + if (scratch_tmp[0] == c0_kscratch()) { UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); } -- cgit From a7cfa7ac1236937dac431845596a39ba27364a00 Mon Sep 17 00:00:00 2001 From: James Hogan <james.hogan@imgtec.com> Date: Sat, 10 Sep 2016 23:56:46 +0100 Subject: KVM: MIPS: Add fast path TLB refill handler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use functions from the general MIPS TLB exception vector generation code (tlbex.c) to construct a fast path TLB refill handler similar to the general one, but cut down and capable of preserving K0 and K1. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/include/asm/kvm_host.h | 1 + arch/mips/kvm/entry.c | 78 ++++++++++++++++++++++++++++++++++++++++ arch/mips/kvm/mips.c | 8 +++-- 3 files changed, 84 insertions(+), 3 deletions(-) (limited to 'arch/mips/kvm/entry.c') diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index fea538fc5331..80928ffa0150 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -554,6 +554,7 @@ extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu); /* Building of entry/exception code */ int kvm_mips_entry_setup(void); void *kvm_mips_build_vcpu_run(void *addr); +void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler); void *kvm_mips_build_exception(void *addr, void *handler); void *kvm_mips_build_exit(void *addr); diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index 7424d3d566ff..1ae33e0e675c 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c @@ -16,6 +16,7 @@ #include <asm/mmu_context.h> #include <asm/msa.h> #include <asm/setup.h> +#include <asm/tlbex.h> #include <asm/uasm.h> /* Register names */ @@ -122,6 +123,9 @@ int kvm_mips_entry_setup(void) */ unsigned int kscratch_mask = cpu_data[0].kscratch_mask; + if (pgd_reg != -1) + kscratch_mask &= ~BIT(pgd_reg); + /* Pick a scratch register for storing VCPU */ if (kscratch_mask) { scratch_vcpu[0] = c0_kscratch(); @@ -380,6 +384,80 @@ static void *kvm_mips_build_enter_guest(void *addr) return p; } +/** + * kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler. + * @addr: Address to start writing code. + * @handler: Address of common handler (within range of @addr). + * + * Assemble TLB refill exception fast path handler for guest execution. + * + * Returns: Next address after end of written function. + */ +void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler) +{ + u32 *p = addr; + struct uasm_label labels[2]; + struct uasm_reloc relocs[2]; + struct uasm_label *l = labels; + struct uasm_reloc *r = relocs; + + memset(labels, 0, sizeof(labels)); + memset(relocs, 0, sizeof(relocs)); + + /* Save guest k1 into scratch register */ + UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); + + /* Get the VCPU pointer from the VCPU scratch register */ + UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); + + /* Save guest k0 into VCPU structure */ + UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1); + + /* + * Some of the common tlbex code uses current_cpu_type(). For KVM we + * assume symmetry and just disable preemption to silence the warning. + */ + preempt_disable(); + + /* + * Now for the actual refill bit. A lot of this can be common with the + * Linux TLB refill handler, however we don't need to handle so many + * cases. We only need to handle user mode refills, and user mode runs + * with 32-bit addressing. + * + * Therefore the branch to label_vmalloc generated by build_get_pmde64() + * that isn't resolved should never actually get taken and is harmless + * to leave in place for now. + */ + +#ifdef CONFIG_64BIT + build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ +#else + build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ +#endif + + /* we don't support huge pages yet */ + + build_get_ptep(&p, K0, K1); + build_update_entries(&p, K0, K1); + build_tlb_write_entry(&p, &l, &r, tlb_random); + + preempt_enable(); + + /* Get the VCPU pointer from the VCPU scratch register again */ + UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); + + /* Restore the guest's k0/k1 registers */ + UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1); + uasm_i_ehb(&p); + UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); + + /* Jump to guest */ + uasm_i_eret(&p); + + return p; +} + /** * kvm_mips_build_exception() - Assemble first level guest exception handler. * @addr: Address to start writing code. diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 7cf85fa1f658..a687864de428 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -264,7 +264,7 @@ static inline void dump_handler(const char *symbol, void *start, void *end) struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { int err, size; - void *gebase, *p, *handler; + void *gebase, *p, *handler, *refill_start, *refill_end; int i; struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); @@ -317,8 +317,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) /* Build guest exception vectors dynamically in unmapped memory */ handler = gebase + 0x2000; - /* TLB Refill, EXL = 0 */ - kvm_mips_build_exception(gebase, handler); + /* TLB refill */ + refill_start = gebase; + refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler); /* General Exception Entry point */ kvm_mips_build_exception(gebase + 0x180, handler); @@ -344,6 +345,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) pr_debug("#include <asm/regdef.h>\n"); pr_debug("\n"); dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p); + dump_handler("kvm_tlb_refill", refill_start, refill_end); dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200); dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); -- cgit From 6a97c775ff77fb7c54adc3f7944205ae66cb5475 Mon Sep 17 00:00:00 2001 From: James Hogan <james.hogan@imgtec.com> Date: Thu, 23 Apr 2015 16:54:35 +0100 Subject: KVM: MIPS: Use CP0_BadInstr[P] for emulation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When exiting from the guest, store the values of the CP0_BadInstr and CP0_BadInstrP registers if they exist, which contain the encodings of the instructions which caused the last synchronous exception. When the instruction is needed for emulation, kvm_get_badinstr() and kvm_get_badinstrp() are used instead of calling kvm_get_inst() directly, to decide whether to read the saved CP0_BadInstr/CP0_BadInstrP registers (if they exist), or read the instruction from memory (if not). The use of these registers should be more robust than using kvm_get_inst(), as it actually gives the instruction encoding seen by the hardware rather than relying on user accessors after the fact, which can be fooled by incoherent icache or a racing code modification. It will also work with VZ, where the guest virtual memory isn't directly accessible by the host with user accessors. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/include/asm/kvm_host.h | 4 ++++ arch/mips/kvm/emulate.c | 48 +++++++++++++++++++++++++++++++++++++--- arch/mips/kvm/entry.c | 14 ++++++++++++ arch/mips/kvm/mips.c | 2 +- 4 files changed, 64 insertions(+), 4 deletions(-) (limited to 'arch/mips/kvm/entry.c') diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index f296ebeda9e3..17c5e53ef3fa 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -280,6 +280,8 @@ struct kvm_vcpu_arch { unsigned long host_cp0_badvaddr; unsigned long host_cp0_epc; u32 host_cp0_cause; + u32 host_cp0_badinstr; + u32 host_cp0_badinstrp; /* GPRS */ unsigned long gprs[32]; @@ -641,6 +643,8 @@ void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, /* Emulation */ int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); +int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); +int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); /** * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index b906fc0589f3..b295a4a1496f 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -54,7 +54,7 @@ static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc, } /* Read the instruction */ - err = kvm_get_inst((u32 *)epc, vcpu, &insn.word); + err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word); if (err) return err; @@ -258,6 +258,48 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) return EMULATE_DONE; } +/** + * kvm_get_badinstr() - Get bad instruction encoding. + * @opc: Guest pointer to faulting instruction. + * @vcpu: KVM VCPU information. + * + * Gets the instruction encoding of the faulting instruction, using the saved + * BadInstr register value if it exists, otherwise falling back to reading guest + * memory at @opc. + * + * Returns: The instruction encoding of the faulting instruction. + */ +int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) +{ + if (cpu_has_badinstr) { + *out = vcpu->arch.host_cp0_badinstr; + return 0; + } else { + return kvm_get_inst(opc, vcpu, out); + } +} + +/** + * kvm_get_badinstrp() - Get bad prior instruction encoding. + * @opc: Guest pointer to prior faulting instruction. + * @vcpu: KVM VCPU information. + * + * Gets the instruction encoding of the prior faulting instruction (the branch + * containing the delay slot which faulted), using the saved BadInstrP register + * value if it exists, otherwise falling back to reading guest memory at @opc. + * + * Returns: The instruction encoding of the prior faulting instruction. + */ +int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) +{ + if (cpu_has_badinstrp) { + *out = vcpu->arch.host_cp0_badinstrp; + return 0; + } else { + return kvm_get_inst(opc, vcpu, out); + } +} + /** * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled. * @vcpu: Virtual CPU. @@ -1839,7 +1881,7 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, /* Fetch the instruction. */ if (cause & CAUSEF_BD) opc += 1; - err = kvm_get_inst(opc, vcpu, &inst.word); + err = kvm_get_badinstr(opc, vcpu, &inst.word); if (err) return EMULATE_FAIL; @@ -2434,7 +2476,7 @@ enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, /* Fetch the instruction. */ if (cause & CAUSEF_BD) opc += 1; - err = kvm_get_inst(opc, vcpu, &inst.word); + err = kvm_get_badinstr(opc, vcpu, &inst.word); if (err) { kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err); return EMULATE_FAIL; diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index 1ae33e0e675c..c5b254c4d0da 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c @@ -53,6 +53,8 @@ /* Some CP0 registers */ #define C0_HWRENA 7, 0 #define C0_BADVADDR 8, 0 +#define C0_BADINSTR 8, 1 +#define C0_BADINSTRP 8, 2 #define C0_ENTRYHI 10, 0 #define C0_STATUS 12, 0 #define C0_CAUSE 13, 0 @@ -579,6 +581,18 @@ void *kvm_mips_build_exit(void *addr) uasm_i_mfc0(&p, K0, C0_CAUSE); uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1); + if (cpu_has_badinstr) { + uasm_i_mfc0(&p, K0, C0_BADINSTR); + uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, + host_cp0_badinstr), K1); + } + + if (cpu_has_badinstrp) { + uasm_i_mfc0(&p, K0, C0_BADINSTRP); + uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, + host_cp0_badinstrp), K1); + } + /* Now restore the host state just enough to run the handlers */ /* Switch EBASE to the one used by Linux */ diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 29afd96069ef..b8f04070bf39 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1471,7 +1471,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) if (cause & CAUSEF_BD) opc += 1; inst = 0; - kvm_get_inst(opc, vcpu, &inst); + kvm_get_badinstr(opc, vcpu, &inst); kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", exccode, opc, inst, badvaddr, kvm_read_c0_guest_status(vcpu->arch.cop0)); -- cgit