diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 772 | ||||
-rw-r--r-- | arch/powerpc/kernel/cpu_setup_power.S | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/cputable.c | 17 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_32.S | 19 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_32.S | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_8xx.S | 72 | ||||
-rw-r--r-- | arch/powerpc/kernel/hw_breakpoint.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/io-workarounds.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/optprobes_head.S | 7 | ||||
-rw-r--r-- | arch/powerpc/kernel/paca.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/pci-common.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 25 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 12 | ||||
-rw-r--r-- | arch/powerpc/kernel/smp.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/stacktrace.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/swsusp_64.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/time.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/traps.c | 1 |
19 files changed, 535 insertions, 430 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index f25239b3a06f..4367e7df51a1 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -72,205 +72,190 @@ #include <asm/fixmap.h> #endif +#define STACK_PT_REGS_OFFSET(sym, val) \ + DEFINE(sym, STACK_FRAME_OVERHEAD + offsetof(struct pt_regs, val)) + int main(void) { - DEFINE(THREAD, offsetof(struct task_struct, thread)); - DEFINE(MM, offsetof(struct task_struct, mm)); - DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); + OFFSET(THREAD, task_struct, thread); + OFFSET(MM, task_struct, mm); + OFFSET(MMCONTEXTID, mm_struct, context.id); #ifdef CONFIG_PPC64 DEFINE(SIGSEGV, SIGSEGV); DEFINE(NMI_MASK, NMI_MASK); - DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr)); + OFFSET(TASKTHREADPPR, task_struct, thread.ppr); #else - DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); + OFFSET(THREAD_INFO, task_struct, stack); DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16)); - DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit)); + OFFSET(KSP_LIMIT, thread_struct, ksp_limit); #endif /* CONFIG_PPC64 */ #ifdef CONFIG_LIVEPATCH - DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp)); + OFFSET(TI_livepatch_sp, thread_info, livepatch_sp); #endif - DEFINE(KSP, offsetof(struct thread_struct, ksp)); - DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); + OFFSET(KSP, thread_struct, ksp); + OFFSET(PT_REGS, thread_struct, regs); #ifdef CONFIG_BOOKE - DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); + OFFSET(THREAD_NORMSAVES, thread_struct, normsave[0]); #endif - DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); - DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state)); - DEFINE(THREAD_FPSAVEAREA, offsetof(struct thread_struct, fp_save_area)); - DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr)); - DEFINE(THREAD_LOAD_FP, offsetof(struct thread_struct, load_fp)); + OFFSET(THREAD_FPEXC_MODE, thread_struct, fpexc_mode); + OFFSET(THREAD_FPSTATE, thread_struct, fp_state); + OFFSET(THREAD_FPSAVEAREA, thread_struct, fp_save_area); + OFFSET(FPSTATE_FPSCR, thread_fp_state, fpscr); + OFFSET(THREAD_LOAD_FP, thread_struct, load_fp); #ifdef CONFIG_ALTIVEC - DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state)); - DEFINE(THREAD_VRSAVEAREA, offsetof(struct thread_struct, vr_save_area)); - DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); - DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); - DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr)); - DEFINE(THREAD_LOAD_VEC, offsetof(struct thread_struct, load_vec)); + OFFSET(THREAD_VRSTATE, thread_struct, vr_state); + OFFSET(THREAD_VRSAVEAREA, thread_struct, vr_save_area); + OFFSET(THREAD_VRSAVE, thread_struct, vrsave); + OFFSET(THREAD_USED_VR, thread_struct, used_vr); + OFFSET(VRSTATE_VSCR, thread_vr_state, vscr); + OFFSET(THREAD_LOAD_VEC, thread_struct, load_vec); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX - DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr)); + OFFSET(THREAD_USED_VSR, thread_struct, used_vsr); #endif /* CONFIG_VSX */ #ifdef CONFIG_PPC64 - DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid)); + OFFSET(KSP_VSID, thread_struct, ksp_vsid); #else /* CONFIG_PPC64 */ - DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); + OFFSET(PGDIR, thread_struct, pgdir); #ifdef CONFIG_SPE - DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0])); - DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc)); - DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr)); - DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); + OFFSET(THREAD_EVR0, thread_struct, evr[0]); + OFFSET(THREAD_ACC, thread_struct, acc); + OFFSET(THREAD_SPEFSCR, thread_struct, spefscr); + OFFSET(THREAD_USED_SPE, thread_struct, used_spe); #endif /* CONFIG_SPE */ #endif /* CONFIG_PPC64 */ #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, debug.dbcr0)); + OFFSET(THREAD_DBCR0, thread_struct, debug.dbcr0); #endif #ifdef CONFIG_KVM_BOOK3S_32_HANDLER - DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); + OFFSET(THREAD_KVM_SVCPU, thread_struct, kvm_shadow_vcpu); #endif #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE) - DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu)); + OFFSET(THREAD_KVM_VCPU, thread_struct, kvm_vcpu); #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM - DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch)); - DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar)); - DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr)); - DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar)); - DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar)); - DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr)); - DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr)); - DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); - DEFINE(THREAD_CKVRSTATE, offsetof(struct thread_struct, - ckvr_state)); - DEFINE(THREAD_CKVRSAVE, offsetof(struct thread_struct, - ckvrsave)); - DEFINE(THREAD_CKFPSTATE, offsetof(struct thread_struct, - ckfp_state)); + OFFSET(PACATMSCRATCH, paca_struct, tm_scratch); + OFFSET(THREAD_TM_TFHAR, thread_struct, tm_tfhar); + OFFSET(THREAD_TM_TEXASR, thread_struct, tm_texasr); + OFFSET(THREAD_TM_TFIAR, thread_struct, tm_tfiar); + OFFSET(THREAD_TM_TAR, thread_struct, tm_tar); + OFFSET(THREAD_TM_PPR, thread_struct, tm_ppr); + OFFSET(THREAD_TM_DSCR, thread_struct, tm_dscr); + OFFSET(PT_CKPT_REGS, thread_struct, ckpt_regs); + OFFSET(THREAD_CKVRSTATE, thread_struct, ckvr_state); + OFFSET(THREAD_CKVRSAVE, thread_struct, ckvrsave); + OFFSET(THREAD_CKFPSTATE, thread_struct, ckfp_state); /* Local pt_regs on stack for Transactional Memory funcs. */ DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ - DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); - DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); - DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); - DEFINE(TI_TASK, offsetof(struct thread_info, task)); - DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); + OFFSET(TI_FLAGS, thread_info, flags); + OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags); + OFFSET(TI_PREEMPT, thread_info, preempt_count); + OFFSET(TI_TASK, thread_info, task); + OFFSET(TI_CPU, thread_info, cpu); #ifdef CONFIG_PPC64 - DEFINE(DCACHEL1BLOCKSIZE, offsetof(struct ppc64_caches, l1d.block_size)); - DEFINE(DCACHEL1LOGBLOCKSIZE, offsetof(struct ppc64_caches, l1d.log_block_size)); - DEFINE(DCACHEL1BLOCKSPERPAGE, offsetof(struct ppc64_caches, l1d.blocks_per_page)); - DEFINE(ICACHEL1BLOCKSIZE, offsetof(struct ppc64_caches, l1i.block_size)); - DEFINE(ICACHEL1LOGBLOCKSIZE, offsetof(struct ppc64_caches, l1i.log_block_size)); - DEFINE(ICACHEL1BLOCKSPERPAGE, offsetof(struct ppc64_caches, l1i.blocks_per_page)); + OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size); + OFFSET(DCACHEL1LOGBLOCKSIZE, ppc64_caches, l1d.log_block_size); + OFFSET(DCACHEL1BLOCKSPERPAGE, ppc64_caches, l1d.blocks_per_page); + OFFSET(ICACHEL1BLOCKSIZE, ppc64_caches, l1i.block_size); + OFFSET(ICACHEL1LOGBLOCKSIZE, ppc64_caches, l1i.log_block_size); + OFFSET(ICACHEL1BLOCKSPERPAGE, ppc64_caches, l1i.blocks_per_page); /* paca */ DEFINE(PACA_SIZE, sizeof(struct paca_struct)); - DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); - DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); - DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); - DEFINE(PACACURRENT, offsetof(struct paca_struct, __current)); - DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr)); - DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); - DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); - DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); - DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase)); - DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); - DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); - DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened)); + OFFSET(PACAPACAINDEX, paca_struct, paca_index); + OFFSET(PACAPROCSTART, paca_struct, cpu_start); + OFFSET(PACAKSAVE, paca_struct, kstack); + OFFSET(PACACURRENT, paca_struct, __current); + OFFSET(PACASAVEDMSR, paca_struct, saved_msr); + OFFSET(PACASTABRR, paca_struct, stab_rr); + OFFSET(PACAR1, paca_struct, saved_r1); + OFFSET(PACATOC, paca_struct, kernel_toc); + OFFSET(PACAKBASE, paca_struct, kernelbase); + OFFSET(PACAKMSR, paca_struct, kernel_msr); + OFFSET(PACASOFTIRQEN, paca_struct, soft_enabled); + OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened); #ifdef CONFIG_PPC_BOOK3S - DEFINE(PACACONTEXTID, offsetof(struct paca_struct, mm_ctx_id)); + OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id); #ifdef CONFIG_PPC_MM_SLICES - DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, - mm_ctx_low_slices_psize)); - DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct, - mm_ctx_high_slices_psize)); + OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize); + OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize); DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def)); #endif /* CONFIG_PPC_MM_SLICES */ #endif #ifdef CONFIG_PPC_BOOK3E - DEFINE(PACAPGD, offsetof(struct paca_struct, pgd)); - DEFINE(PACA_KERNELPGD, offsetof(struct paca_struct, kernel_pgd)); - DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); - DEFINE(PACA_EXTLB, offsetof(struct paca_struct, extlb)); - DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); - DEFINE(PACA_EXCRIT, offsetof(struct paca_struct, excrit)); - DEFINE(PACA_EXDBG, offsetof(struct paca_struct, exdbg)); - DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack)); - DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack)); - DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack)); - DEFINE(PACA_TCD_PTR, offsetof(struct paca_struct, tcd_ptr)); - - DEFINE(TCD_ESEL_NEXT, - offsetof(struct tlb_core_data, esel_next)); - DEFINE(TCD_ESEL_MAX, - offsetof(struct tlb_core_data, esel_max)); - DEFINE(TCD_ESEL_FIRST, - offsetof(struct tlb_core_data, esel_first)); + OFFSET(PACAPGD, paca_struct, pgd); + OFFSET(PACA_KERNELPGD, paca_struct, kernel_pgd); + OFFSET(PACA_EXGEN, paca_struct, exgen); + OFFSET(PACA_EXTLB, paca_struct, extlb); + OFFSET(PACA_EXMC, paca_struct, exmc); + OFFSET(PACA_EXCRIT, paca_struct, excrit); + OFFSET(PACA_EXDBG, paca_struct, exdbg); + OFFSET(PACA_MC_STACK, paca_struct, mc_kstack); + OFFSET(PACA_CRIT_STACK, paca_struct, crit_kstack); + OFFSET(PACA_DBG_STACK, paca_struct, dbg_kstack); + OFFSET(PACA_TCD_PTR, paca_struct, tcd_ptr); + + OFFSET(TCD_ESEL_NEXT, tlb_core_data, esel_next); + OFFSET(TCD_ESEL_MAX, tlb_core_data, esel_max); + OFFSET(TCD_ESEL_FIRST, tlb_core_data, esel_first); #endif /* CONFIG_PPC_BOOK3E */ #ifdef CONFIG_PPC_STD_MMU_64 - DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); - DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); - DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp)); + OFFSET(PACASLBCACHE, paca_struct, slb_cache); + OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr); + OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp); #ifdef CONFIG_PPC_MM_SLICES - DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp)); + OFFSET(MMUPSIZESLLP, mmu_psize_def, sllp); #else - DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, mm_ctx_sllp)); + OFFSET(PACACONTEXTSLLP, paca_struct, mm_ctx_sllp); #endif /* CONFIG_PPC_MM_SLICES */ - DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); - DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); - DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); - DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); - DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr)); - DEFINE(SLBSHADOW_STACKVSID, - offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); - DEFINE(SLBSHADOW_STACKESID, - offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); - DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); - DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use)); - DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); - DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count)); - DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); + OFFSET(PACA_EXGEN, paca_struct, exgen); + OFFSET(PACA_EXMC, paca_struct, exmc); + OFFSET(PACA_EXSLB, paca_struct, exslb); + OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr); + OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr); + OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid); + OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid); + OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area); + OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use); + OFFSET(LPPACA_DTLIDX, lppaca, dtl_idx); + OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count); + OFFSET(PACA_DTL_RIDX, paca_struct, dtl_ridx); #endif /* CONFIG_PPC_STD_MMU_64 */ - DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); + OFFSET(PACAEMERGSP, paca_struct, emergency_sp); #ifdef CONFIG_PPC_BOOK3S_64 - DEFINE(PACAMCEMERGSP, offsetof(struct paca_struct, mc_emergency_sp)); - DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce)); -#endif - DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); - DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); - DEFINE(PACA_DSCR_DEFAULT, offsetof(struct paca_struct, dscr_default)); - DEFINE(ACCOUNT_STARTTIME, - offsetof(struct paca_struct, accounting.starttime)); - DEFINE(ACCOUNT_STARTTIME_USER, - offsetof(struct paca_struct, accounting.starttime_user)); - DEFINE(ACCOUNT_USER_TIME, - offsetof(struct paca_struct, accounting.utime)); - DEFINE(ACCOUNT_SYSTEM_TIME, - offsetof(struct paca_struct, accounting.stime)); - DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); - DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost)); - DEFINE(PACA_SPRG_VDSO, offsetof(struct paca_struct, sprg_vdso)); + OFFSET(PACAMCEMERGSP, paca_struct, mc_emergency_sp); + OFFSET(PACA_IN_MCE, paca_struct, in_mce); +#endif + OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); + OFFSET(PACAKEXECSTATE, paca_struct, kexec_state); + OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default); + OFFSET(ACCOUNT_STARTTIME, paca_struct, accounting.starttime); + OFFSET(ACCOUNT_STARTTIME_USER, paca_struct, accounting.starttime_user); + OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime); + OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime); + OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save); + OFFSET(PACA_NAPSTATELOST, paca_struct, nap_state_lost); + OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso); #else /* CONFIG_PPC64 */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - DEFINE(ACCOUNT_STARTTIME, - offsetof(struct thread_info, accounting.starttime)); - DEFINE(ACCOUNT_STARTTIME_USER, - offsetof(struct thread_info, accounting.starttime_user)); - DEFINE(ACCOUNT_USER_TIME, - offsetof(struct thread_info, accounting.utime)); - DEFINE(ACCOUNT_SYSTEM_TIME, - offsetof(struct thread_info, accounting.stime)); + OFFSET(ACCOUNT_STARTTIME, thread_info, accounting.starttime); + OFFSET(ACCOUNT_STARTTIME_USER, thread_info, accounting.starttime_user); + OFFSET(ACCOUNT_USER_TIME, thread_info, accounting.utime); + OFFSET(ACCOUNT_SYSTEM_TIME, thread_info, accounting.stime); #endif #endif /* CONFIG_PPC64 */ /* RTAS */ - DEFINE(RTASBASE, offsetof(struct rtas_t, base)); - DEFINE(RTASENTRY, offsetof(struct rtas_t, entry)); + OFFSET(RTASBASE, rtas_t, base); + OFFSET(RTASENTRY, rtas_t, entry); /* Interrupt register frame */ DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); @@ -280,38 +265,38 @@ int main(void) DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); #endif /* CONFIG_PPC64 */ - DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0])); - DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1])); - DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2])); - DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3])); - DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4])); - DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5])); - DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6])); - DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7])); - DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8])); - DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9])); - DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10])); - DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11])); - DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12])); - DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13])); + STACK_PT_REGS_OFFSET(GPR0, gpr[0]); + STACK_PT_REGS_OFFSET(GPR1, gpr[1]); + STACK_PT_REGS_OFFSET(GPR2, gpr[2]); + STACK_PT_REGS_OFFSET(GPR3, gpr[3]); + STACK_PT_REGS_OFFSET(GPR4, gpr[4]); + STACK_PT_REGS_OFFSET(GPR5, gpr[5]); + STACK_PT_REGS_OFFSET(GPR6, gpr[6]); + STACK_PT_REGS_OFFSET(GPR7, gpr[7]); + STACK_PT_REGS_OFFSET(GPR8, gpr[8]); + STACK_PT_REGS_OFFSET(GPR9, gpr[9]); + STACK_PT_REGS_OFFSET(GPR10, gpr[10]); + STACK_PT_REGS_OFFSET(GPR11, gpr[11]); + STACK_PT_REGS_OFFSET(GPR12, gpr[12]); + STACK_PT_REGS_OFFSET(GPR13, gpr[13]); #ifndef CONFIG_PPC64 - DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14])); + STACK_PT_REGS_OFFSET(GPR14, gpr[14]); #endif /* CONFIG_PPC64 */ /* * Note: these symbols include _ because they overlap with special * register names */ - DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip)); - DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr)); - DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr)); - DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link)); - DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr)); - DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer)); - DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); - DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); - DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3)); - DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result)); - DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); + STACK_PT_REGS_OFFSET(_NIP, nip); + STACK_PT_REGS_OFFSET(_MSR, msr); + STACK_PT_REGS_OFFSET(_CTR, ctr); + STACK_PT_REGS_OFFSET(_LINK, link); + STACK_PT_REGS_OFFSET(_CCR, ccr); + STACK_PT_REGS_OFFSET(_XER, xer); + STACK_PT_REGS_OFFSET(_DAR, dar); + STACK_PT_REGS_OFFSET(_DSISR, dsisr); + STACK_PT_REGS_OFFSET(ORIG_GPR3, orig_gpr3); + STACK_PT_REGS_OFFSET(RESULT, result); + STACK_PT_REGS_OFFSET(_TRAP, trap); #ifndef CONFIG_PPC64 /* * The PowerPC 400-class & Book-E processors have neither the DAR @@ -319,10 +304,10 @@ int main(void) * DEAR and ESR SPRs for such processors. For critical interrupts * we use them to hold SRR0 and SRR1. */ - DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); - DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); + STACK_PT_REGS_OFFSET(_DEAR, dar); + STACK_PT_REGS_OFFSET(_ESR, dsisr); #else /* CONFIG_PPC64 */ - DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe)); + STACK_PT_REGS_OFFSET(SOFTE, softe); /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */ DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)); @@ -351,17 +336,17 @@ int main(void) #endif #ifndef CONFIG_PPC64 - DEFINE(MM_PGD, offsetof(struct mm_struct, pgd)); + OFFSET(MM_PGD, mm_struct, pgd); #endif /* ! CONFIG_PPC64 */ /* About the CPU features table */ - DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); - DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); - DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore)); + OFFSET(CPU_SPEC_FEATURES, cpu_spec, cpu_features); + OFFSET(CPU_SPEC_SETUP, cpu_spec, cpu_setup); + OFFSET(CPU_SPEC_RESTORE, cpu_spec, cpu_restore); - DEFINE(pbe_address, offsetof(struct pbe, address)); - DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); - DEFINE(pbe_next, offsetof(struct pbe, next)); + OFFSET(pbe_address, pbe, address); + OFFSET(pbe_orig_address, pbe, orig_address); + OFFSET(pbe_next, pbe, next); #ifndef CONFIG_PPC64 DEFINE(TASK_SIZE, TASK_SIZE); @@ -369,40 +354,40 @@ int main(void) #endif /* ! CONFIG_PPC64 */ /* datapage offsets for use by vdso */ - DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp)); - DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec)); - DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs)); - DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count)); - DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest)); - DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); - DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32)); - DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec)); - DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); - DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime)); - DEFINE(STAMP_SEC_FRAC, offsetof(struct vdso_data, stamp_sec_fraction)); - DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size)); - DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size)); - DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size)); - DEFINE(CFG_DCACHE_LOGBLOCKSZ, offsetof(struct vdso_data, dcache_log_block_size)); + OFFSET(CFG_TB_ORIG_STAMP, vdso_data, tb_orig_stamp); + OFFSET(CFG_TB_TICKS_PER_SEC, vdso_data, tb_ticks_per_sec); + OFFSET(CFG_TB_TO_XS, vdso_data, tb_to_xs); + OFFSET(CFG_TB_UPDATE_COUNT, vdso_data, tb_update_count); + OFFSET(CFG_TZ_MINUTEWEST, vdso_data, tz_minuteswest); + OFFSET(CFG_TZ_DSTTIME, vdso_data, tz_dsttime); + OFFSET(CFG_SYSCALL_MAP32, vdso_data, syscall_map_32); + OFFSET(WTOM_CLOCK_SEC, vdso_data, wtom_clock_sec); + OFFSET(WTOM_CLOCK_NSEC, vdso_data, wtom_clock_nsec); + OFFSET(STAMP_XTIME, vdso_data, stamp_xtime); + OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction); + OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size); + OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size); + OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_data, icache_log_block_size); + OFFSET(CFG_DCACHE_LOGBLOCKSZ, vdso_data, dcache_log_block_size); #ifdef CONFIG_PPC64 - DEFINE(CFG_SYSCALL_MAP64, offsetof(struct vdso_data, syscall_map_64)); - DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec)); - DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec)); - DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec)); - DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec)); - DEFINE(TSPC64_TV_SEC, offsetof(struct timespec, tv_sec)); - DEFINE(TSPC64_TV_NSEC, offsetof(struct timespec, tv_nsec)); - DEFINE(TSPC32_TV_SEC, offsetof(struct compat_timespec, tv_sec)); - DEFINE(TSPC32_TV_NSEC, offsetof(struct compat_timespec, tv_nsec)); + OFFSET(CFG_SYSCALL_MAP64, vdso_data, syscall_map_64); + OFFSET(TVAL64_TV_SEC, timeval, tv_sec); + OFFSET(TVAL64_TV_USEC, timeval, tv_usec); + OFFSET(TVAL32_TV_SEC, compat_timeval, tv_sec); + OFFSET(TVAL32_TV_USEC, compat_timeval, tv_usec); + OFFSET(TSPC64_TV_SEC, timespec, tv_sec); + OFFSET(TSPC64_TV_NSEC, timespec, tv_nsec); + OFFSET(TSPC32_TV_SEC, compat_timespec, tv_sec); + OFFSET(TSPC32_TV_NSEC, compat_timespec, tv_nsec); #else - DEFINE(TVAL32_TV_SEC, offsetof(struct timeval, tv_sec)); - DEFINE(TVAL32_TV_USEC, offsetof(struct timeval, tv_usec)); - DEFINE(TSPC32_TV_SEC, offsetof(struct timespec, tv_sec)); - DEFINE(TSPC32_TV_NSEC, offsetof(struct timespec, tv_nsec)); + OFFSET(TVAL32_TV_SEC, timeval, tv_sec); + OFFSET(TVAL32_TV_USEC, timeval, tv_usec); + OFFSET(TSPC32_TV_SEC, timespec, tv_sec); + OFFSET(TSPC32_TV_NSEC, timespec, tv_nsec); #endif /* timeval/timezone offsets for use by vdso */ - DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); - DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); + OFFSET(TZONE_TZ_MINWEST, timezone, tz_minuteswest); + OFFSET(TZONE_TZ_DSTTIME, timezone, tz_dsttime); /* Other bits used by the vdso */ DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); @@ -422,170 +407,170 @@ int main(void) DEFINE(PTE_SIZE, sizeof(pte_t)); #ifdef CONFIG_KVM - DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); - DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); - DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid)); - DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); - DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); - DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr)); + OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack); + OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid); + OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid); + OFFSET(VCPU_GPRS, kvm_vcpu, arch.gpr); + OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave); + OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr); #ifdef CONFIG_ALTIVEC - DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr)); + OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr); #endif - DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); - DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); - DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); + OFFSET(VCPU_XER, kvm_vcpu, arch.xer); + OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr); + OFFSET(VCPU_LR, kvm_vcpu, arch.lr); #ifdef CONFIG_PPC_BOOK3S - DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar)); + OFFSET(VCPU_TAR, kvm_vcpu, arch.tar); #endif - DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); - DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); + OFFSET(VCPU_CR, kvm_vcpu, arch.cr); + OFFSET(VCPU_PC, kvm_vcpu, arch.pc); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr)); - DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0)); - DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1)); - DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0)); - DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1)); - DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); - DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); + OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr); + OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0); + OFFSET(VCPU_SRR1, kvm_vcpu, arch.shregs.srr1); + OFFSET(VCPU_SPRG0, kvm_vcpu, arch.shregs.sprg0); + OFFSET(VCPU_SPRG1, kvm_vcpu, arch.shregs.sprg1); + OFFSET(VCPU_SPRG2, kvm_vcpu, arch.shregs.sprg2); + OFFSET(VCPU_SPRG3, kvm_vcpu, arch.shregs.sprg3); #endif #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING - DEFINE(VCPU_TB_RMENTRY, offsetof(struct kvm_vcpu, arch.rm_entry)); - DEFINE(VCPU_TB_RMINTR, offsetof(struct kvm_vcpu, arch.rm_intr)); - DEFINE(VCPU_TB_RMEXIT, offsetof(struct kvm_vcpu, arch.rm_exit)); - DEFINE(VCPU_TB_GUEST, offsetof(struct kvm_vcpu, arch.guest_time)); - DEFINE(VCPU_TB_CEDE, offsetof(struct kvm_vcpu, arch.cede_time)); - DEFINE(VCPU_CUR_ACTIVITY, offsetof(struct kvm_vcpu, arch.cur_activity)); - DEFINE(VCPU_ACTIVITY_START, offsetof(struct kvm_vcpu, arch.cur_tb_start)); - DEFINE(TAS_SEQCOUNT, offsetof(struct kvmhv_tb_accumulator, seqcount)); - DEFINE(TAS_TOTAL, offsetof(struct kvmhv_tb_accumulator, tb_total)); - DEFINE(TAS_MIN, offsetof(struct kvmhv_tb_accumulator, tb_min)); - DEFINE(TAS_MAX, offsetof(struct kvmhv_tb_accumulator, tb_max)); -#endif - DEFINE(VCPU_SHARED_SPRG3, offsetof(struct kvm_vcpu_arch_shared, sprg3)); - DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4)); - DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5)); - DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6)); - DEFINE(VCPU_SHARED_SPRG7, offsetof(struct kvm_vcpu_arch_shared, sprg7)); - DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); - DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1)); - DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); - DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); - DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); + OFFSET(VCPU_TB_RMENTRY, kvm_vcpu, arch.rm_entry); + OFFSET(VCPU_TB_RMINTR, kvm_vcpu, arch.rm_intr); + OFFSET(VCPU_TB_RMEXIT, kvm_vcpu, arch.rm_exit); + OFFSET(VCPU_TB_GUEST, kvm_vcpu, arch.guest_time); + OFFSET(VCPU_TB_CEDE, kvm_vcpu, arch.cede_time); + OFFSET(VCPU_CUR_ACTIVITY, kvm_vcpu, arch.cur_activity); + OFFSET(VCPU_ACTIVITY_START, kvm_vcpu, arch.cur_tb_start); + OFFSET(TAS_SEQCOUNT, kvmhv_tb_accumulator, seqcount); + OFFSET(TAS_TOTAL, kvmhv_tb_accumulator, tb_total); + OFFSET(TAS_MIN, kvmhv_tb_accumulator, tb_min); + OFFSET(TAS_MAX, kvmhv_tb_accumulator, tb_max); +#endif + OFFSET(VCPU_SHARED_SPRG3, kvm_vcpu_arch_shared, sprg3); + OFFSET(VCPU_SHARED_SPRG4, kvm_vcpu_arch_shared, sprg4); + OFFSET(VCPU_SHARED_SPRG5, kvm_vcpu_arch_shared, sprg5); + OFFSET(VCPU_SHARED_SPRG6, kvm_vcpu_arch_shared, sprg6); + OFFSET(VCPU_SHARED_SPRG7, kvm_vcpu_arch_shared, sprg7); + OFFSET(VCPU_SHADOW_PID, kvm_vcpu, arch.shadow_pid); + OFFSET(VCPU_SHADOW_PID1, kvm_vcpu, arch.shadow_pid1); + OFFSET(VCPU_SHARED, kvm_vcpu, arch.shared); + OFFSET(VCPU_SHARED_MSR, kvm_vcpu_arch_shared, msr); + OFFSET(VCPU_SHADOW_MSR, kvm_vcpu, arch.shadow_msr); #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) - DEFINE(VCPU_SHAREDBE, offsetof(struct kvm_vcpu, arch.shared_big_endian)); + OFFSET(VCPU_SHAREDBE, kvm_vcpu, arch.shared_big_endian); #endif - DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0)); - DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1)); - DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2)); - DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3)); - DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4)); - DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6)); + OFFSET(VCPU_SHARED_MAS0, kvm_vcpu_arch_shared, mas0); + OFFSET(VCPU_SHARED_MAS1, kvm_vcpu_arch_shared, mas1); + OFFSET(VCPU_SHARED_MAS2, kvm_vcpu_arch_shared, mas2); + OFFSET(VCPU_SHARED_MAS7_3, kvm_vcpu_arch_shared, mas7_3); + OFFSET(VCPU_SHARED_MAS4, kvm_vcpu_arch_shared, mas4); + OFFSET(VCPU_SHARED_MAS6, kvm_vcpu_arch_shared, mas6); - DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); - DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); + OFFSET(VCPU_KVM, kvm_vcpu, kvm); + OFFSET(KVM_LPID, kvm, arch.lpid); /* book3s */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - DEFINE(KVM_TLB_SETS, offsetof(struct kvm, arch.tlb_sets)); - DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); - DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); - DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); - DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); - DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits)); - DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls)); - DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); - DEFINE(KVM_RADIX, offsetof(struct kvm, arch.radix)); - DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); - DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); - DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); - DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); - DEFINE(VCPU_HEIR, offsetof(struct kvm_vcpu, arch.emul_inst)); - DEFINE(VCPU_CPU, offsetof(struct kvm_vcpu, cpu)); - DEFINE(VCPU_THREAD_CPU, offsetof(struct kvm_vcpu, arch.thread_cpu)); + OFFSET(KVM_TLB_SETS, kvm, arch.tlb_sets); + OFFSET(KVM_SDR1, kvm, arch.sdr1); + OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid); + OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr); + OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1); + OFFSET(KVM_NEED_FLUSH, kvm, arch.need_tlb_flush.bits); + OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls); + OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v); + OFFSET(KVM_RADIX, kvm, arch.radix); + OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr); + OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar); + OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr); + OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty); + OFFSET(VCPU_HEIR, kvm_vcpu, arch.emul_inst); + OFFSET(VCPU_CPU, kvm_vcpu, cpu); + OFFSET(VCPU_THREAD_CPU, kvm_vcpu, arch.thread_cpu); #endif #ifdef CONFIG_PPC_BOOK3S - DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); - DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); - DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic)); - DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); - DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); - DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); - DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr)); - DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); - DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); - DEFINE(VCPU_DABRX, offsetof(struct kvm_vcpu, arch.dabrx)); - DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr)); - DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx)); - DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr)); - DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); - DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); - DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); - DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions)); - DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded)); - DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); - DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); - DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); - DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc)); - DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar)); - DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar)); - DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier)); - DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); - DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); - DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); - DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); - DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); - DEFINE(VCPU_FAULT_GPA, offsetof(struct kvm_vcpu, arch.fault_gpa)); - DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr)); - DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); - DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); - DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); - DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); - DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr)); - DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb)); - DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr)); - DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr)); - DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr)); - DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr)); - DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr)); - DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr)); - DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop)); - DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort)); - DEFINE(VCPU_TID, offsetof(struct kvm_vcpu, arch.tid)); - DEFINE(VCPU_PSSCR, offsetof(struct kvm_vcpu, arch.psscr)); - DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_map)); - DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); - DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); - DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm)); - DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); - DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); - DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); - DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes)); - DEFINE(VCORE_VTB, offsetof(struct kvmppc_vcore, vtb)); - DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); - DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); + OFFSET(VCPU_PURR, kvm_vcpu, arch.purr); + OFFSET(VCPU_SPURR, kvm_vcpu, arch.spurr); + OFFSET(VCPU_IC, kvm_vcpu, arch.ic); + OFFSET(VCPU_DSCR, kvm_vcpu, arch.dscr); + OFFSET(VCPU_AMR, kvm_vcpu, arch.amr); + OFFSET(VCPU_UAMOR, kvm_vcpu, arch.uamor); + OFFSET(VCPU_IAMR, kvm_vcpu, arch.iamr); + OFFSET(VCPU_CTRL, kvm_vcpu, arch.ctrl); + OFFSET(VCPU_DABR, kvm_vcpu, arch.dabr); + OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx); + OFFSET(VCPU_DAWR, kvm_vcpu, arch.dawr); + OFFSET(VCPU_DAWRX, kvm_vcpu, arch.dawrx); + OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr); + OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags); + OFFSET(VCPU_DEC, kvm_vcpu, arch.dec); + OFFSET(VCPU_DEC_EXPIRES, kvm_vcpu, arch.dec_expires); + OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions); + OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded); + OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded); + OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr); + OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc); + OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc); + OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar); + OFFSET(VCPU_SDAR, kvm_vcpu, arch.sdar); + OFFSET(VCPU_SIER, kvm_vcpu, arch.sier); + OFFSET(VCPU_SLB, kvm_vcpu, arch.slb); + OFFSET(VCPU_SLB_MAX, kvm_vcpu, arch.slb_max); + OFFSET(VCPU_SLB_NR, kvm_vcpu, arch.slb_nr); + OFFSET(VCPU_FAULT_DSISR, kvm_vcpu, arch.fault_dsisr); + OFFSET(VCPU_FAULT_DAR, kvm_vcpu, arch.fault_dar); + OFFSET(VCPU_FAULT_GPA, kvm_vcpu, arch.fault_gpa); + OFFSET(VCPU_INTR_MSR, kvm_vcpu, arch.intr_msr); + OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst); + OFFSET(VCPU_TRAP, kvm_vcpu, arch.trap); + OFFSET(VCPU_CFAR, kvm_vcpu, arch.cfar); + OFFSET(VCPU_PPR, kvm_vcpu, arch.ppr); + OFFSET(VCPU_FSCR, kvm_vcpu, arch.fscr); + OFFSET(VCPU_PSPB, kvm_vcpu, arch.pspb); + OFFSET(VCPU_EBBHR, kvm_vcpu, arch.ebbhr); + OFFSET(VCPU_EBBRR, kvm_vcpu, arch.ebbrr); + OFFSET(VCPU_BESCR, kvm_vcpu, arch.bescr); + OFFSET(VCPU_CSIGR, kvm_vcpu, arch.csigr); + OFFSET(VCPU_TACR, kvm_vcpu, arch.tacr); + OFFSET(VCPU_TCSCR, kvm_vcpu, arch.tcscr); + OFFSET(VCPU_ACOP, kvm_vcpu, arch.acop); + OFFSET(VCPU_WORT, kvm_vcpu, arch.wort); + OFFSET(VCPU_TID, kvm_vcpu, arch.tid); + OFFSET(VCPU_PSSCR, kvm_vcpu, arch.psscr); + OFFSET(VCORE_ENTRY_EXIT, kvmppc_vcore, entry_exit_map); + OFFSET(VCORE_IN_GUEST, kvmppc_vcore, in_guest); + OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads); + OFFSET(VCORE_KVM, kvmppc_vcore, kvm); + OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset); + OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr); + OFFSET(VCORE_PCR, kvmppc_vcore, pcr); + OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes); + OFFSET(VCORE_VTB, kvmppc_vcore, vtb); + OFFSET(VCPU_SLB_E, kvmppc_slb, orige); + OFFSET(VCPU_SLB_V, kvmppc_slb, origv); DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM - DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar)); - DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar)); - DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr)); - DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm)); - DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr)); - DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr)); - DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm)); - DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm)); - DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm)); - DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm)); - DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm)); - DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm)); - DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm)); - DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm)); - DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm)); + OFFSET(VCPU_TFHAR, kvm_vcpu, arch.tfhar); + OFFSET(VCPU_TFIAR, kvm_vcpu, arch.tfiar); + OFFSET(VCPU_TEXASR, kvm_vcpu, arch.texasr); + OFFSET(VCPU_GPR_TM, kvm_vcpu, arch.gpr_tm); + OFFSET(VCPU_FPRS_TM, kvm_vcpu, arch.fp_tm.fpr); + OFFSET(VCPU_VRS_TM, kvm_vcpu, arch.vr_tm.vr); + OFFSET(VCPU_VRSAVE_TM, kvm_vcpu, arch.vrsave_tm); + OFFSET(VCPU_CR_TM, kvm_vcpu, arch.cr_tm); + OFFSET(VCPU_XER_TM, kvm_vcpu, arch.xer_tm); + OFFSET(VCPU_LR_TM, kvm_vcpu, arch.lr_tm); + OFFSET(VCPU_CTR_TM, kvm_vcpu, arch.ctr_tm); + OFFSET(VCPU_AMR_TM, kvm_vcpu, arch.amr_tm); + OFFSET(VCPU_PPR_TM, kvm_vcpu, arch.ppr_tm); + OFFSET(VCPU_DSCR_TM, kvm_vcpu, arch.dscr_tm); + OFFSET(VCPU_TAR_TM, kvm_vcpu, arch.tar_tm); #endif #ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE - DEFINE(PACA_SVCPU, offsetof(struct paca_struct, shadow_vcpu)); + OFFSET(PACA_SVCPU, paca_struct, shadow_vcpu); # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) #else # define SVCPU_FIELD(x, f) @@ -668,11 +653,11 @@ int main(void) HSTATE_FIELD(HSTATE_DECEXP, dec_expires); HSTATE_FIELD(HSTATE_SPLIT_MODE, kvm_split_mode); DEFINE(IPI_PRIORITY, IPI_PRIORITY); - DEFINE(KVM_SPLIT_RPR, offsetof(struct kvm_split_mode, rpr)); - DEFINE(KVM_SPLIT_PMMAR, offsetof(struct kvm_split_mode, pmmar)); - DEFINE(KVM_SPLIT_LDBAR, offsetof(struct kvm_split_mode, ldbar)); - DEFINE(KVM_SPLIT_DO_NAP, offsetof(struct kvm_split_mode, do_nap)); - DEFINE(KVM_SPLIT_NAPPED, offsetof(struct kvm_split_mode, napped)); + OFFSET(KVM_SPLIT_RPR, kvm_split_mode, rpr); + OFFSET(KVM_SPLIT_PMMAR, kvm_split_mode, pmmar); + OFFSET(KVM_SPLIT_LDBAR, kvm_split_mode, ldbar); + OFFSET(KVM_SPLIT_DO_NAP, kvm_split_mode, do_nap); + OFFSET(KVM_SPLIT_NAPPED, kvm_split_mode, napped); #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #ifdef CONFIG_PPC_BOOK3S_64 @@ -682,32 +667,27 @@ int main(void) #endif /* CONFIG_PPC_BOOK3S_64 */ #else /* CONFIG_PPC_BOOK3S */ - DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); - DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); - DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); - DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); - DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); - DEFINE(VCPU_SPRG9, offsetof(struct kvm_vcpu, arch.sprg9)); - DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); - DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); - DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); - DEFINE(VCPU_CRIT_SAVE, offsetof(struct kvm_vcpu, arch.crit_save)); + OFFSET(VCPU_CR, kvm_vcpu, arch.cr); + OFFSET(VCPU_XER, kvm_vcpu, arch.xer); + OFFSET(VCPU_LR, kvm_vcpu, arch.lr); + OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr); + OFFSET(VCPU_PC, kvm_vcpu, arch.pc); + OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9); + OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst); + OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear); + OFFSET(VCPU_FAULT_ESR, kvm_vcpu, arch.fault_esr); + OFFSET(VCPU_CRIT_SAVE, kvm_vcpu, arch.crit_save); #endif /* CONFIG_PPC_BOOK3S */ #endif /* CONFIG_KVM */ #ifdef CONFIG_KVM_GUEST - DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared, - scratch1)); - DEFINE(KVM_MAGIC_SCRATCH2, offsetof(struct kvm_vcpu_arch_shared, - scratch2)); - DEFINE(KVM_MAGIC_SCRATCH3, offsetof(struct kvm_vcpu_arch_shared, - scratch3)); - DEFINE(KVM_MAGIC_INT, offsetof(struct kvm_vcpu_arch_shared, - int_pending)); - DEFINE(KVM_MAGIC_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); - DEFINE(KVM_MAGIC_CRITICAL, offsetof(struct kvm_vcpu_arch_shared, - critical)); - DEFINE(KVM_MAGIC_SR, offsetof(struct kvm_vcpu_arch_shared, sr)); + OFFSET(KVM_MAGIC_SCRATCH1, kvm_vcpu_arch_shared, scratch1); + OFFSET(KVM_MAGIC_SCRATCH2, kvm_vcpu_arch_shared, scratch2); + OFFSET(KVM_MAGIC_SCRATCH3, kvm_vcpu_arch_shared, scratch3); + OFFSET(KVM_MAGIC_INT, kvm_vcpu_arch_shared, int_pending); + OFFSET(KVM_MAGIC_MSR, kvm_vcpu_arch_shared, msr); + OFFSET(KVM_MAGIC_CRITICAL, kvm_vcpu_arch_shared, critical); + OFFSET(KVM_MAGIC_SR, kvm_vcpu_arch_shared, sr); #endif #ifdef CONFIG_44x @@ -716,45 +696,37 @@ int main(void) #endif #ifdef CONFIG_PPC_FSL_BOOK3E DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); - DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0)); - DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1)); - DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2)); - DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3)); - DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7)); + OFFSET(TLBCAM_MAS0, tlbcam, MAS0); + OFFSET(TLBCAM_MAS1, tlbcam, MAS1); + OFFSET(TLBCAM_MAS2, tlbcam, MAS2); + OFFSET(TLBCAM_MAS3, tlbcam, MAS3); + OFFSET(TLBCAM_MAS7, tlbcam, MAS7); #endif #if defined(CONFIG_KVM) && defined(CONFIG_SPE) - DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0])); - DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc)); - DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr)); - DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr)); + OFFSET(VCPU_EVR, kvm_vcpu, arch.evr[0]); + OFFSET(VCPU_ACC, kvm_vcpu, arch.acc); + OFFSET(VCPU_SPEFSCR, kvm_vcpu, arch.spefscr); + OFFSET(VCPU_HOST_SPEFSCR, kvm_vcpu, arch.host_spefscr); #endif #ifdef CONFIG_KVM_BOOKE_HV - DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4)); - DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6)); + OFFSET(VCPU_HOST_MAS4, kvm_vcpu, arch.host_mas4); + OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6); #endif #ifdef CONFIG_KVM_EXIT_TIMING - DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, - arch.timing_exit.tv32.tbu)); - DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu, - arch.timing_exit.tv32.tbl)); - DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu, - arch.timing_last_enter.tv32.tbu)); - DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu, - arch.timing_last_enter.tv32.tbl)); + OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu); + OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl); + OFFSET(VCPU_TIMING_LAST_ENTER_TBU, kvm_vcpu, arch.timing_last_enter.tv32.tbu); + OFFSET(VCPU_TIMING_LAST_ENTER_TBL, kvm_vcpu, arch.timing_last_enter.tv32.tbl); #endif #ifdef CONFIG_PPC_POWERNV - DEFINE(PACA_CORE_IDLE_STATE_PTR, - offsetof(struct paca_struct, core_idle_state_ptr)); - DEFINE(PACA_THREAD_IDLE_STATE, - offsetof(struct paca_struct, thread_idle_state)); - DEFINE(PACA_THREAD_MASK, - offsetof(struct paca_struct, thread_mask)); - DEFINE(PACA_SUBCORE_SIBLING_MASK, - offsetof(struct paca_struct, subcore_sibling_mask)); + OFFSET(PACA_CORE_IDLE_STATE_PTR, paca_struct, core_idle_state_ptr); + OFFSET(PACA_THREAD_IDLE_STATE, paca_struct, thread_idle_state); + OFFSET(PACA_THREAD_MASK, paca_struct, thread_mask); + OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask); #endif DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER); diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 917188615bf5..7fe8c79e6937 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -101,6 +101,8 @@ _GLOBAL(__setup_cpu_power9) mfspr r3,SPRN_LPCR LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) or r3, r3, r4 + LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR) + andc r3, r3, r4 bl __init_LPCR bl __init_HFSCR bl __init_tlb_power9 @@ -122,6 +124,8 @@ _GLOBAL(__restore_cpu_power9) mfspr r3,SPRN_LPCR LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) or r3, r3, r4 + LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR) + andc r3, r3, r4 bl __init_LPCR bl __init_HFSCR bl __init_tlb_power9 diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 6a82ef039c50..bb7a1890aeb7 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -386,6 +386,23 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check_early = __machine_check_early_realmode_p8, .platform = "power8", }, + { /* 3.00-compliant processor, i.e. Power9 "architected" mode */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x0f000005, + .cpu_name = "POWER9 (architected)", + .cpu_features = CPU_FTRS_POWER9, + .cpu_user_features = COMMON_USER_POWER9, + .cpu_user_features2 = COMMON_USER2_POWER9, + .mmu_features = MMU_FTRS_POWER9, + .icache_bsize = 128, + .dcache_bsize = 128, + .oprofile_type = PPC_OPROFILE_INVALID, + .oprofile_cpu_type = "ppc64/ibm-compat-v1", + .cpu_setup = __setup_cpu_power9, + .cpu_restore = __restore_cpu_power9, + .flush_tlb = __flush_tlb_power9, + .platform = "power9", + }, { /* Power7 */ .pvr_mask = 0xffff0000, .pvr_value = 0x003f0000, diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 3841d749a430..a38600949f3a 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -205,6 +205,9 @@ transfer_to_handler_cont: mflr r9 lwz r11,0(r9) /* virtual address of handler */ lwz r9,4(r9) /* where to go when done */ +#ifdef CONFIG_PPC_8xx_PERF_EVENT + mtspr SPRN_NRI, r0 +#endif #ifdef CONFIG_TRACE_IRQFLAGS lis r12,reenable_mmu@h ori r12,r12,reenable_mmu@l @@ -292,7 +295,9 @@ stack_ovf: lis r9,StackOverflow@ha addi r9,r9,StackOverflow@l LOAD_MSR_KERNEL(r10,MSR_KERNEL) - FIX_SRR1(r10,r12) +#ifdef CONFIG_PPC_8xx_PERF_EVENT + mtspr SPRN_NRI, r0 +#endif mtspr SPRN_SRR0,r9 mtspr SPRN_SRR1,r10 SYNC @@ -417,9 +422,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) mtlr r4 mtcr r5 lwz r7,_NIP(r1) - FIX_SRR1(r8, r0) lwz r2,GPR2(r1) lwz r1,GPR1(r1) +#ifdef CONFIG_PPC_8xx_PERF_EVENT + mtspr SPRN_NRI, r0 +#endif mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 SYNC @@ -699,6 +706,9 @@ fast_exception_return: lwz r10,_LINK(r11) mtlr r10 REST_GPR(10, r11) +#ifdef CONFIG_PPC_8xx_PERF_EVENT + mtspr SPRN_NRI, r0 +#endif mtspr SPRN_SRR1,r9 mtspr SPRN_SRR0,r12 REST_GPR(9, r11) @@ -947,7 +957,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) .globl exc_exit_restart exc_exit_restart: lwz r12,_NIP(r1) - FIX_SRR1(r9,r10) +#ifdef CONFIG_PPC_8xx_PERF_EVENT + mtspr SPRN_NRI, r0 +#endif mtspr SPRN_SRR0,r12 mtspr SPRN_SRR1,r9 REST_4GPRS(9, r1) @@ -1290,7 +1302,6 @@ _GLOBAL(enter_rtas) 1: tophys(r9,r1) lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */ lwz r9,8(r9) /* original msr value */ - FIX_SRR1(r9,r0) addi r1,r1,INT_FRAME_SIZE li r0,0 mtspr SPRN_SPRG_RTAS,r0 diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 9d963547d243..1607be7c0ef2 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -869,7 +869,6 @@ __secondary_start: /* enable MMU and jump to start_secondary */ li r4,MSR_KERNEL - FIX_SRR1(r4,r5) lis r3,start_secondary@h ori r3,r3,start_secondary@l mtspr SPRN_SRR0,r3 @@ -977,7 +976,6 @@ start_here: ori r4,r4,2f@l tophys(r4,r4) li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) - FIX_SRR1(r3,r5) mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r3 SYNC @@ -1001,7 +999,6 @@ start_here: /* Now turn on the MMU for real! */ li r4,MSR_KERNEL - FIX_SRR1(r4,r5) lis r3,start_kernel@h ori r3,r3,start_kernel@l mtspr SPRN_SRR0,r3 diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 1a9c99d3e5d8..c032fe8c2d26 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -329,6 +329,12 @@ InstructionTLBMiss: mtspr SPRN_SPRG_SCRATCH2, r3 #endif EXCEPTION_PROLOG_0 +#ifdef CONFIG_PPC_8xx_PERF_EVENT + lis r10, (itlb_miss_counter - PAGE_OFFSET)@ha + lwz r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10) + addi r11, r11, 1 + stw r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10) +#endif /* If we are faulting a kernel address, we have to use the * kernel page tables. @@ -429,6 +435,12 @@ InstructionTLBMiss: DataStoreTLBMiss: mtspr SPRN_SPRG_SCRATCH2, r3 EXCEPTION_PROLOG_0 +#ifdef CONFIG_PPC_8xx_PERF_EVENT + lis r10, (dtlb_miss_counter - PAGE_OFFSET)@ha + lwz r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10) + addi r11, r11, 1 + stw r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10) +#endif mfcr r3 /* If we are faulting a kernel address, we have to use the @@ -561,6 +573,7 @@ InstructionTLBError: andis. r10,r5,0x4000 beq+ 1f tlbie r4 +itlbie: /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */ 1: EXC_XFER_LITE(0x400, handle_page_fault) @@ -585,6 +598,7 @@ DARFixed:/* Return from dcbx instruction bug workaround */ andis. r10,r5,0x4000 beq+ 1f tlbie r4 +dtlbie: 1: li r10,RPN_PATTERN mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */ /* 0x300 is DataAccess exception, needed by bad_page_fault() */ @@ -602,8 +616,43 @@ DARFixed:/* Return from dcbx instruction bug workaround */ * support of breakpoints and such. Someday I will get around to * using them. */ - EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE) + . = 0x1c00 +DataBreakpoint: + EXCEPTION_PROLOG_0 + mfcr r10 + mfspr r11, SPRN_SRR0 + cmplwi cr0, r11, (dtlbie - PAGE_OFFSET)@l + cmplwi cr7, r11, (itlbie - PAGE_OFFSET)@l + beq- cr0, 11f + beq- cr7, 11f + EXCEPTION_PROLOG_1 + EXCEPTION_PROLOG_2 + addi r3,r1,STACK_FRAME_OVERHEAD + mfspr r4,SPRN_BAR + stw r4,_DAR(r11) + mfspr r5,SPRN_DSISR + EXC_XFER_EE(0x1c00, do_break) +11: + mtcr r10 + EXCEPTION_EPILOG_0 + rfi + +#ifdef CONFIG_PPC_8xx_PERF_EVENT + . = 0x1d00 +InstructionBreakpoint: + EXCEPTION_PROLOG_0 + lis r10, (instruction_counter - PAGE_OFFSET)@ha + lwz r11, (instruction_counter - PAGE_OFFSET)@l(r10) + addi r11, r11, -1 + stw r11, (instruction_counter - PAGE_OFFSET)@l(r10) + lis r10, 0xffff + ori r10, r10, 0x01 + mtspr SPRN_COUNTA, r10 + EXCEPTION_EPILOG_0 + rfi +#else EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE) +#endif EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE) EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE) @@ -977,6 +1026,14 @@ initial_mmu: lis r8, IDC_ENABLE@h mtspr SPRN_DC_CST, r8 #endif + /* Disable debug mode entry on breakpoints */ + mfspr r8, SPRN_DER +#ifdef CONFIG_PPC_8xx_PERF_EVENT + rlwinm r8, r8, 0, ~0xc +#else + rlwinm r8, r8, 0, ~0x8 +#endif + mtspr SPRN_DER, r8 blr @@ -1010,3 +1067,16 @@ cpu6_errata_word: .space 16 #endif +#ifdef CONFIG_PPC_8xx_PERF_EVENT + .globl itlb_miss_counter +itlb_miss_counter: + .space 4 + + .globl dtlb_miss_counter +dtlb_miss_counter: + .space 4 + + .globl instruction_counter +instruction_counter: + .space 4 +#endif diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 53cc9270aac8..53b9c1dfd7d9 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -211,9 +211,11 @@ int hw_breakpoint_handler(struct die_args *args) int rc = NOTIFY_STOP; struct perf_event *bp; struct pt_regs *regs = args->regs; +#ifndef CONFIG_PPC_8xx int stepped = 1; - struct arch_hw_breakpoint *info; unsigned int instr; +#endif + struct arch_hw_breakpoint *info; unsigned long dar = regs->dar; /* Disable breakpoints during exception handling */ @@ -257,6 +259,7 @@ int hw_breakpoint_handler(struct die_args *args) (dar - bp->attr.bp_addr < bp->attr.bp_len))) info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; +#ifndef CONFIG_PPC_8xx /* Do not emulate user-space instructions, instead single-step them */ if (user_mode(regs)) { current->thread.last_hit_ubp = bp; @@ -280,6 +283,7 @@ int hw_breakpoint_handler(struct die_args *args) perf_event_disable_inatomic(bp); goto out; } +#endif /* * As a policy, the callback is invoked in a 'trigger-after-execute' * fashion diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c index 5f8613ceb97f..a582e0d42525 100644 --- a/arch/powerpc/kernel/io-workarounds.c +++ b/arch/powerpc/kernel/io-workarounds.c @@ -12,7 +12,7 @@ #undef DEBUG #include <linux/kernel.h> -#include <linux/sched.h> /* for init_mm */ +#include <linux/sched/mm.h> /* for init_mm */ #include <asm/io.h> #include <asm/machdep.h> diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S index 53e429b5a29d..4937bef7652f 100644 --- a/arch/powerpc/kernel/optprobes_head.S +++ b/arch/powerpc/kernel/optprobes_head.S @@ -65,6 +65,13 @@ optprobe_template_entry: mfdsisr r5 std r5,_DSISR(r1) + /* + * We may get here from a module, so load the kernel TOC in r2. + * The original TOC gets restored when pt_regs is restored + * further below. + */ + ld r2,PACATOC(r13) + .global optprobe_template_op_address optprobe_template_op_address: /* diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index fa20060ff7a5..dfc479df9634 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -10,6 +10,7 @@ #include <linux/smp.h> #include <linux/export.h> #include <linux/memblock.h> +#include <linux/sched/task.h> #include <asm/lppaca.h> #include <asm/paca.h> diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 8e6fde8d28f3..ffda24a38dda 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1560,16 +1560,10 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose, /* Hookup PHB Memory resources */ for (i = 0; i < 3; ++i) { res = &hose->mem_resources[i]; - if (!res->flags) { - if (i == 0) - printk(KERN_ERR "PCI: Memory resource 0 not set for " - "host bridge %s (domain %d)\n", - hose->dn->full_name, hose->global_number); + if (!res->flags) continue; - } - offset = hose->mem_offset[i]; - + offset = hose->mem_offset[i]; pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i, res, (unsigned long long)offset); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 5dd056df0baa..d645da302bf2 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -16,6 +16,9 @@ #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> @@ -730,6 +733,28 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) mtspr(SPRN_DABRX, dabrx); return 0; } +#elif defined(CONFIG_PPC_8xx) +static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) +{ + unsigned long addr = dabr & ~HW_BRK_TYPE_DABR; + unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */ + unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */ + + if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ) + lctrl1 |= 0xa0000; + else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE) + lctrl1 |= 0xf0000; + else if ((dabr & HW_BRK_TYPE_RDWR) == 0) + lctrl2 = 0; + + mtspr(SPRN_LCTRL2, 0); + mtspr(SPRN_CMPE, addr); + mtspr(SPRN_CMPF, addr + 4); + mtspr(SPRN_LCTRL1, lctrl1); + mtspr(SPRN_LCTRL2, lctrl2); + + return 0; +} #else static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) { diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 616de028f7f8..a3944540fe0d 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -839,7 +839,7 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { 0, #endif .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN), - .bin_opts = OV5_FEAT(OV5_RESIZE_HPT), + .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT), .micro_checkpoint = 0, .reserved0 = 0, .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */ diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index b9855f1b290a..adf2084f214b 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -113,14 +113,12 @@ void __init setup_tlb_core_data(void) * If we have threads, we need either tlbsrx. * or e6500 tablewalk mode, or else TLB handlers * will be racy and could produce duplicate entries. + * Should we panic instead? */ - if (smt_enabled_at_boot >= 2 && - !mmu_has_feature(MMU_FTR_USE_TLBRSRV) && - book3e_htw_mode != PPC_HTW_E6500) { - /* Should we panic instead? */ - WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n", - __func__); - } + WARN_ONCE(smt_enabled_at_boot >= 2 && + !mmu_has_feature(MMU_FTR_USE_TLBRSRV) && + book3e_htw_mode != PPC_HTW_E6500, + "%s: unsupported MMU configuration\n", __func__); } } #endif diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 573fb3a461b5..46f89e66a273 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -19,7 +19,8 @@ #include <linux/kernel.h> #include <linux/export.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/topology.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/delay.h> @@ -795,7 +796,7 @@ void __init smp_cpus_done(unsigned int max_cpus) * se we pin us down to CPU 0 for a short while */ alloc_cpumask_var(&old_mask, GFP_NOWAIT); - cpumask_copy(old_mask, tsk_cpus_allowed(current)); + cpumask_copy(old_mask, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); if (smp_ops && smp_ops->setup_cpu) diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index 4f24606afc3f..66711958493c 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -12,6 +12,7 @@ #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/stacktrace.h> #include <asm/ptrace.h> #include <asm/processor.h> diff --git a/arch/powerpc/kernel/swsusp_64.c b/arch/powerpc/kernel/swsusp_64.c index 0e899e47c325..51db012808f5 100644 --- a/arch/powerpc/kernel/swsusp_64.c +++ b/arch/powerpc/kernel/swsusp_64.c @@ -10,6 +10,7 @@ #include <linux/irq.h> #include <linux/sched.h> #include <linux/interrupt.h> +#include <linux/nmi.h> void do_after_copyback(void) { diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 14e485525e31..07b90725855e 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -34,6 +34,7 @@ #include <linux/errno.h> #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> @@ -57,7 +58,7 @@ #include <linux/clk-provider.h> #include <linux/suspend.h> #include <linux/rtc.h> -#include <linux/cputime.h> +#include <linux/sched/cputime.h> #include <asm/trace.h> #include <asm/io.h> @@ -709,7 +710,7 @@ unsigned long long running_clock(void) * time and on a host which doesn't do any virtualisation TB *should* equal * VTB so it makes no difference anyway. */ - return local_clock() - cputime_to_nsecs(kcpustat_this_cpu->cpustat[CPUTIME_STEAL]); + return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL]; } #endif diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index e6cc56b61d01..ff365f9de27a 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -17,6 +17,7 @@ #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> |