diff options
Diffstat (limited to 'arch/powerpc/include/asm/kvm_book3s.h')
| -rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 196 | 
1 files changed, 163 insertions, 33 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index d62e703f1214..98da010252a3 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -24,20 +24,6 @@  #include <linux/kvm_host.h>  #include <asm/kvm_book3s_asm.h> -struct kvmppc_slb { -	u64 esid; -	u64 vsid; -	u64 orige; -	u64 origv; -	bool valid	: 1; -	bool Ks		: 1; -	bool Kp		: 1; -	bool nx		: 1; -	bool large	: 1;	/* PTEs are 16MB */ -	bool tb		: 1;	/* 1TB segment */ -	bool class	: 1; -}; -  struct kvmppc_bat {  	u64 raw;  	u32 bepi; @@ -67,11 +53,22 @@ struct kvmppc_sid_map {  #define VSID_POOL_SIZE	(SID_CONTEXTS * 16)  #endif +struct hpte_cache { +	struct hlist_node list_pte; +	struct hlist_node list_pte_long; +	struct hlist_node list_vpte; +	struct hlist_node list_vpte_long; +	struct rcu_head rcu_head; +	u64 host_va; +	u64 pfn; +	ulong slot; +	struct kvmppc_pte pte; +}; +  struct kvmppc_vcpu_book3s {  	struct kvm_vcpu vcpu;  	struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;  	struct kvmppc_sid_map sid_map[SID_MAP_NUM]; -	struct kvmppc_slb slb[64];  	struct {  		u64 esid;  		u64 vsid; @@ -81,7 +78,6 @@ struct kvmppc_vcpu_book3s {  	struct kvmppc_bat dbat[8];  	u64 hid[6];  	u64 gqr[8]; -	int slb_nr;  	u64 sdr1;  	u64 hior;  	u64 msr_mask; @@ -93,7 +89,13 @@ struct kvmppc_vcpu_book3s {  	u64 vsid_max;  #endif  	int context_id[SID_CONTEXTS]; -	ulong prog_flags; /* flags to inject when giving a 700 trap */ + +	struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; +	struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; +	struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; +	struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; +	int hpte_cache_count; +	spinlock_t mmu_lock;  };  #define CONTEXT_HOST		0 @@ -110,8 +112,10 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask)  extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);  extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);  extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); +extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);  extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);  extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); +extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);  extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);  extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);  extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); @@ -123,19 +127,22 @@ extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);  extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);  extern int kvmppc_mmu_hpte_sysinit(void);  extern void kvmppc_mmu_hpte_sysexit(void); +extern int kvmppc_mmu_hv_init(void);  extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);  extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);  extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); +extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);  extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,  			   bool upper, u32 val);  extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);  extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);  extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); -extern ulong kvmppc_trampoline_lowmem; -extern ulong kvmppc_trampoline_enter; +extern void kvmppc_handler_lowmem_trampoline(void); +extern void kvmppc_handler_trampoline_enter(void);  extern void kvmppc_rmcall(ulong srr0, ulong srr1); +extern void kvmppc_hv_entry_trampoline(void);  extern void kvmppc_load_up_fpu(void);  extern void kvmppc_load_up_altivec(void);  extern void kvmppc_load_up_vsx(void); @@ -147,15 +154,32 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)  	return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);  } -static inline ulong dsisr(void) +extern void kvm_return_point(void); + +/* Also add subarch specific defines */ + +#ifdef CONFIG_KVM_BOOK3S_32_HANDLER +#include <asm/kvm_book3s_32.h> +#endif +#ifdef CONFIG_KVM_BOOK3S_64_HANDLER +#include <asm/kvm_book3s_64.h> +#endif + +#ifdef CONFIG_KVM_BOOK3S_PR + +static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)  { -	ulong r; -	asm ( "mfdsisr %0 " : "=r" (r) ); -	return r; +	return to_book3s(vcpu)->hior;  } -extern void kvm_return_point(void); -static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu); +static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, +			unsigned long pending_now, unsigned long old_pending) +{ +	if (pending_now) +		vcpu->arch.shared->int_pending = 1; +	else if (old_pending) +		vcpu->arch.shared->int_pending = 0; +}  static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)  { @@ -244,6 +268,120 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)  	return to_svcpu(vcpu)->fault_dar;  } +static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) +{ +	ulong crit_raw = vcpu->arch.shared->critical; +	ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); +	bool crit; + +	/* Truncate crit indicators in 32 bit mode */ +	if (!(vcpu->arch.shared->msr & MSR_SF)) { +		crit_raw &= 0xffffffff; +		crit_r1 &= 0xffffffff; +	} + +	/* Critical section when crit == r1 */ +	crit = (crit_raw == crit_r1); +	/* ... and we're in supervisor mode */ +	crit = crit && !(vcpu->arch.shared->msr & MSR_PR); + +	return crit; +} +#else /* CONFIG_KVM_BOOK3S_PR */ + +static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) +{ +	return 0; +} + +static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, +			unsigned long pending_now, unsigned long old_pending) +{ +} + +static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) +{ +	vcpu->arch.gpr[num] = val; +} + +static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) +{ +	return vcpu->arch.gpr[num]; +} + +static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) +{ +	vcpu->arch.cr = val; +} + +static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) +{ +	return vcpu->arch.cr; +} + +static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) +{ +	vcpu->arch.xer = val; +} + +static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) +{ +	return vcpu->arch.xer; +} + +static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) +{ +	vcpu->arch.ctr = val; +} + +static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) +{ +	return vcpu->arch.ctr; +} + +static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) +{ +	vcpu->arch.lr = val; +} + +static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) +{ +	return vcpu->arch.lr; +} + +static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) +{ +	vcpu->arch.pc = val; +} + +static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) +{ +	return vcpu->arch.pc; +} + +static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) +{ +	ulong pc = kvmppc_get_pc(vcpu); + +	/* Load the instruction manually if it failed to do so in the +	 * exit path */ +	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) +		kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); + +	return vcpu->arch.last_inst; +} + +static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) +{ +	return vcpu->arch.fault_dar; +} + +static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) +{ +	return false; +} +#endif +  /* Magic register values loaded into r3 and r4 before the 'sc' assembly   * instruction for the OSI hypercalls */  #define OSI_SC_MAGIC_R3			0x113724FA @@ -251,12 +389,4 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)  #define INS_DCBZ			0x7c0007ec -/* Also add subarch specific defines */ - -#ifdef CONFIG_PPC_BOOK3S_32 -#include <asm/kvm_book3s_32.h> -#else -#include <asm/kvm_book3s_64.h> -#endif -  #endif /* __ASM_KVM_BOOK3S_H__ */  |