diff options
author | Linus Torvalds <[email protected]> | 2017-05-08 12:37:56 -0700 |
---|---|---|
committer | Linus Torvalds <[email protected]> | 2017-05-08 12:37:56 -0700 |
commit | 2d3e4866dea96b0506395b47bfefb234f2088dac (patch) | |
tree | d5c7bd97d222bef46f9d73adee8c79dbdb9f82f4 /arch/powerpc/kvm/emulate_loadstore.c | |
parent | 9c6ee01ed5bb1ee489d580eaa60d7eb5a8ede336 (diff) | |
parent | 2e5b0bd9cc6172edef502dfae28ae790f74a882e (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini:
"ARM:
- HYP mode stub supports kexec/kdump on 32-bit
- improved PMU support
- virtual interrupt controller performance improvements
- support for userspace virtual interrupt controller (slower, but
necessary for KVM on the weird Broadcom SoCs used by the Raspberry
Pi 3)
MIPS:
- basic support for hardware virtualization (ImgTec P5600/P6600/I6400
and Cavium Octeon III)
PPC:
- in-kernel acceleration for VFIO
s390:
- support for guests without storage keys
- adapter interruption suppression
x86:
- usual range of nVMX improvements, notably nested EPT support for
accessed and dirty bits
- emulation of CPL3 CPUID faulting
generic:
- first part of VCPU thread request API
- kvm_stat improvements"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (227 commits)
kvm: nVMX: Don't validate disabled secondary controls
KVM: put back #ifndef CONFIG_S390 around kvm_vcpu_kick
Revert "KVM: Support vCPU-based gfn->hva cache"
tools/kvm: fix top level makefile
KVM: x86: don't hold kvm->lock in KVM_SET_GSI_ROUTING
KVM: Documentation: remove VM mmap documentation
kvm: nVMX: Remove superfluous VMX instruction fault checks
KVM: x86: fix emulation of RSM and IRET instructions
KVM: mark requests that need synchronization
KVM: return if kvm_vcpu_wake_up() did wake up the VCPU
KVM: add explicit barrier to kvm_vcpu_kick
KVM: perform a wake_up in kvm_make_all_cpus_request
KVM: mark requests that do not need a wakeup
KVM: remove #ifndef CONFIG_S390 around kvm_vcpu_wake_up
KVM: x86: always use kvm_make_request instead of set_bit
KVM: add kvm_{test,clear}_request to replace {test,clear}_bit
s390: kvm: Cpu model support for msa6, msa7 and msa8
KVM: x86: remove irq disablement around KVM_SET_CLOCK/KVM_GET_CLOCK
kvm: better MWAIT emulation for guests
KVM: x86: virtualize cpuid faulting
...
Diffstat (limited to 'arch/powerpc/kvm/emulate_loadstore.c')
-rw-r--r-- | arch/powerpc/kvm/emulate_loadstore.c | 472 |
1 files changed, 431 insertions, 41 deletions
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index 6d3c0ee1d744..af833531af31 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -34,18 +34,38 @@ #include "timing.h" #include "trace.h" -/* XXX to do: - * lhax - * lhaux - * lswx - * lswi - * stswx - * stswi - * lha - * lhau - * lmw - * stmw +#ifdef CONFIG_PPC_FPU +static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) +{ + if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { + kvmppc_core_queue_fpunavail(vcpu); + return true; + } + + return false; +} +#endif /* CONFIG_PPC_FPU */ + +#ifdef CONFIG_VSX +static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) +{ + if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { + kvmppc_core_queue_vsx_unavail(vcpu); + return true; + } + + return false; +} +#endif /* CONFIG_VSX */ + +/* + * XXX to do: + * lfiwax, lfiwzx + * vector loads and stores * + * Instructions that trap when used on cache-inhibited mappings + * are not emulated here: multiple and string instructions, + * lq/stq, and the load-reserve/store-conditional instructions. */ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) { @@ -66,6 +86,19 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) rs = get_rs(inst); rt = get_rt(inst); + /* + * if mmio_vsx_tx_sx_enabled == 0, copy data between + * VSR[0..31] and memory + * if mmio_vsx_tx_sx_enabled == 1, copy data between + * VSR[32..63] and memory + */ + vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst); + vcpu->arch.mmio_vsx_copy_nums = 0; + vcpu->arch.mmio_vsx_offset = 0; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE; + vcpu->arch.mmio_sp64_extend = 0; + vcpu->arch.mmio_sign_extend = 0; + switch (get_op(inst)) { case 31: switch (get_xop(inst)) { @@ -73,6 +106,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; + case OP_31_XOP_LWZUX: + emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + case OP_31_XOP_LBZX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; @@ -82,22 +120,36 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; + case OP_31_XOP_STDX: + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 8, 1); + break; + + case OP_31_XOP_STDUX: + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 8, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + case OP_31_XOP_STWX: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 4, 1); + kvmppc_get_gpr(vcpu, rs), 4, 1); + break; + + case OP_31_XOP_STWUX: + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 4, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STBX: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 1, 1); + kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 1, 1); + kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; @@ -105,6 +157,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; + case OP_31_XOP_LHAUX: + emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + case OP_31_XOP_LHZX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; @@ -116,14 +173,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) case OP_31_XOP_STHX: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 2, 1); + kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 2, 1); + kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; @@ -143,8 +198,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) case OP_31_XOP_STWBRX: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 4, 0); + kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: @@ -153,10 +207,258 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) case OP_31_XOP_STHBRX: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 2, 0); + kvmppc_get_gpr(vcpu, rs), 2, 0); + break; + + case OP_31_XOP_LDBRX: + emulated = kvmppc_handle_load(run, vcpu, rt, 8, 0); + break; + + case OP_31_XOP_STDBRX: + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 8, 0); + break; + + case OP_31_XOP_LDX: + emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); + break; + + case OP_31_XOP_LDUX: + emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_31_XOP_LWAX: + emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); + break; + + case OP_31_XOP_LWAUX: + emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + +#ifdef CONFIG_PPC_FPU + case OP_31_XOP_LFSX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 4, 1); + break; + + case OP_31_XOP_LFSUX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 4, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_31_XOP_LFDX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 8, 1); + break; + + case OP_31_XOP_LFDUX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 8, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_31_XOP_LFIWAX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_loads(run, vcpu, + KVM_MMIO_REG_FPR|rt, 4, 1); + break; + + case OP_31_XOP_LFIWZX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 4, 1); + break; + + case OP_31_XOP_STFSX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), 4, 1); + break; + + case OP_31_XOP_STFSUX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), 4, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_31_XOP_STFDX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), 8, 1); + break; + + case OP_31_XOP_STFDUX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), 8, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_31_XOP_STFIWX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), 4, 1); + break; +#endif + +#ifdef CONFIG_VSX + case OP_31_XOP_LXSDX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + emulated = kvmppc_handle_vsx_load(run, vcpu, + KVM_MMIO_REG_VSX|rt, 8, 1, 0); + break; + + case OP_31_XOP_LXSSPX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_vsx_load(run, vcpu, + KVM_MMIO_REG_VSX|rt, 4, 1, 0); + break; + + case OP_31_XOP_LXSIWAX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + emulated = kvmppc_handle_vsx_load(run, vcpu, + KVM_MMIO_REG_VSX|rt, 4, 1, 1); + break; + + case OP_31_XOP_LXSIWZX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + emulated = kvmppc_handle_vsx_load(run, vcpu, + KVM_MMIO_REG_VSX|rt, 4, 1, 0); + break; + + case OP_31_XOP_LXVD2X: + /* + * In this case, the official load/store process is like this: + * Step1, exit from vm by page fault isr, then kvm save vsr. + * Please see guest_exit_cont->store_fp_state->SAVE_32VSRS + * as reference. + * + * Step2, copy data between memory and VCPU + * Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use + * 2copies*8bytes or 4copies*4bytes + * to simulate one copy of 16bytes. + * Also there is an endian issue here, we should notice the + * layout of memory. + * Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference. + * If host is little-endian, kvm will call XXSWAPD for + * LXVD2X_ROT/STXVD2X_ROT. + * So, if host is little-endian, + * the postion of memeory should be swapped. + * + * Step3, return to guest, kvm reset register. + * Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS + * as reference. + */ + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 2; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + emulated = kvmppc_handle_vsx_load(run, vcpu, + KVM_MMIO_REG_VSX|rt, 8, 1, 0); + break; + + case OP_31_XOP_LXVW4X: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 4; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; + emulated = kvmppc_handle_vsx_load(run, vcpu, + KVM_MMIO_REG_VSX|rt, 4, 1, 0); + break; + + case OP_31_XOP_LXVDSX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = + KVMPPC_VSX_COPY_DWORD_LOAD_DUMP; + emulated = kvmppc_handle_vsx_load(run, vcpu, + KVM_MMIO_REG_VSX|rt, 8, 1, 0); + break; + + case OP_31_XOP_STXSDX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + emulated = kvmppc_handle_vsx_store(run, vcpu, + rs, 8, 1); break; + case OP_31_XOP_STXSSPX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_vsx_store(run, vcpu, + rs, 4, 1); + break; + + case OP_31_XOP_STXSIWX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_offset = 1; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; + emulated = kvmppc_handle_vsx_store(run, vcpu, + rs, 4, 1); + break; + + case OP_31_XOP_STXVD2X: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 2; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + emulated = kvmppc_handle_vsx_store(run, vcpu, + rs, 8, 1); + break; + + case OP_31_XOP_STXVW4X: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 4; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; + emulated = kvmppc_handle_vsx_store(run, vcpu, + rs, 4, 1); + break; +#endif /* CONFIG_VSX */ default: emulated = EMULATE_FAIL; break; @@ -167,10 +469,60 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; - /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ +#ifdef CONFIG_PPC_FPU + case OP_STFS: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), + 4, 1); + break; + + case OP_STFSU: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), + 4, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_STFD: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), + 8, 1); + break; + + case OP_STFDU: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), + 8, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; +#endif + case OP_LD: rt = get_rt(inst); - emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); + switch (inst & 3) { + case 0: /* ld */ + emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); + break; + case 1: /* ldu */ + emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + case 2: /* lwa */ + emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); + break; + default: + emulated = EMULATE_FAIL; + } break; case OP_LWZU: @@ -193,31 +545,37 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) 4, 1); break; - /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ case OP_STD: rs = get_rs(inst); - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 8, 1); + switch (inst & 3) { + case 0: /* std */ + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 8, 1); + break; + case 1: /* stdu */ + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 8, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + default: + emulated = EMULATE_FAIL; + } break; case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 4, 1); + kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 1, 1); + kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 1, 1); + kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; @@ -241,16 +599,48 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) case OP_STH: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 2, 1); + kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 2, 1); + kvmppc_get_gpr(vcpu, rs), 2, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + +#ifdef CONFIG_PPC_FPU + case OP_LFS: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 4, 1); + break; + + case OP_LFSU: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 4, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_LFD: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 8, 1); + break; + + case OP_LFDU: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 8, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; +#endif default: emulated = EMULATE_FAIL; |