diff options
Diffstat (limited to 'arch/powerpc/kernel/process.c')
| -rw-r--r-- | arch/powerpc/kernel/process.c | 58 | 
1 files changed, 48 insertions, 10 deletions
| diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 406d7ee9e322..984813a4d5dc 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -628,7 +628,7 @@ static void do_break_handler(struct pt_regs *regs)  {  	struct arch_hw_breakpoint null_brk = {0};  	struct arch_hw_breakpoint *info; -	struct ppc_inst instr = ppc_inst(0); +	ppc_inst_t instr = ppc_inst(0);  	int type = 0;  	int size = 0;  	unsigned long ea; @@ -1156,6 +1156,40 @@ static inline void save_sprs(struct thread_struct *t)  #endif  } +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +void kvmppc_save_user_regs(void) +{ +	unsigned long usermsr; + +	if (!current->thread.regs) +		return; + +	usermsr = current->thread.regs->msr; + +	if (usermsr & MSR_FP) +		save_fpu(current); + +	if (usermsr & MSR_VEC) +		save_altivec(current); + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +	if (usermsr & MSR_TM) { +		current->thread.tm_tfhar = mfspr(SPRN_TFHAR); +		current->thread.tm_tfiar = mfspr(SPRN_TFIAR); +		current->thread.tm_texasr = mfspr(SPRN_TEXASR); +		current->thread.regs->msr &= ~MSR_TM; +	} +#endif +} +EXPORT_SYMBOL_GPL(kvmppc_save_user_regs); + +void kvmppc_save_current_sprs(void) +{ +	save_sprs(¤t->thread); +} +EXPORT_SYMBOL_GPL(kvmppc_save_current_sprs); +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ +  static inline void restore_sprs(struct thread_struct *old_thread,  				struct thread_struct *new_thread)  { @@ -1206,7 +1240,7 @@ struct task_struct *__switch_to(struct task_struct *prev,  {  	struct thread_struct *new_thread, *old_thread;  	struct task_struct *last; -#ifdef CONFIG_PPC_BOOK3S_64 +#ifdef CONFIG_PPC_64S_HASH_MMU  	struct ppc64_tlb_batch *batch;  #endif @@ -1215,7 +1249,7 @@ struct task_struct *__switch_to(struct task_struct *prev,  	WARN_ON(!irqs_disabled()); -#ifdef CONFIG_PPC_BOOK3S_64 +#ifdef CONFIG_PPC_64S_HASH_MMU  	batch = this_cpu_ptr(&ppc64_tlb_batch);  	if (batch->active) {  		current_thread_info()->local_flags |= _TLF_LAZY_MMU; @@ -1281,9 +1315,9 @@ struct task_struct *__switch_to(struct task_struct *prev,  	set_return_regs_changed(); /* _switch changes stack (and regs) */ -#ifdef CONFIG_PPC32 -	kuap_assert_locked(); -#endif +	if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64)) +		kuap_assert_locked(); +  	last = _switch(old_thread, new_thread);  	/* @@ -1294,6 +1328,7 @@ struct task_struct *__switch_to(struct task_struct *prev,  	 */  #ifdef CONFIG_PPC_BOOK3S_64 +#ifdef CONFIG_PPC_64S_HASH_MMU  	/*  	 * This applies to a process that was context switched while inside  	 * arch_enter_lazy_mmu_mode(), to re-activate the batch that was @@ -1305,6 +1340,7 @@ struct task_struct *__switch_to(struct task_struct *prev,  		batch = this_cpu_ptr(&ppc64_tlb_batch);  		batch->active = 1;  	} +#endif  	/*  	 * Math facilities are masked out of the child MSR in copy_thread. @@ -1655,7 +1691,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)  static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)  { -#ifdef CONFIG_PPC_BOOK3S_64 +#ifdef CONFIG_PPC_64S_HASH_MMU  	unsigned long sp_vsid;  	unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; @@ -1767,6 +1803,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,  #if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)  	p->thread.kuap = KUAP_NONE;  #endif +#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP) +	p->thread.pid = MMU_NO_CONTEXT; +#endif  	setup_ksp_vsid(p, sp); @@ -2299,10 +2338,9 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)  	 * the heap, we can put it above 1TB so it is backed by a 1TB  	 * segment. Otherwise the heap will be in the bottom 1TB  	 * which always uses 256MB segments and this may result in a -	 * performance penalty. We don't need to worry about radix. For -	 * radix, mmu_highuser_ssize remains unchanged from 256MB. +	 * performance penalty.  	 */ -	if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) +	if (!radix_enabled() && !is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))  		base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);  #endif |