aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorChristophe Leroy <[email protected]>2022-09-19 19:01:33 +0200
committerMichael Ellerman <[email protected]>2022-09-26 23:00:13 +1000
commite0d68273d7069537701bb91c51d90d1e12aacc33 (patch)
tree437bc2b4a05d22f7b02dadb0c7ba34792d9c9b20 /arch/powerpc/kernel
parentd7216567c65cbed655f9bf87ef906f9246d6f698 (diff)
powerpc: Remove CONFIG_PPC_BOOK3E
CONFIG_PPC_BOOK3E is redundant with CONFIG_PPC_BOOK3E_64. The later is more explicit about the fact that it's a 64 bits target. Remove CONFIG_PPC_BOOK3E. Signed-off-by: Christophe Leroy <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/5d0891490813c19cdcfc04678f512ea68cba3e64.1663606876.git.christophe.leroy@csgroup.eu
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c6
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/kernel/head_64.S40
-rw-r--r--arch/powerpc/kernel/misc_64.S6
-rw-r--r--arch/powerpc/kernel/paca.c6
-rw-r--r--arch/powerpc/kernel/setup.h2
-rw-r--r--arch/powerpc/kernel/setup_64.c8
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
8 files changed, 38 insertions, 38 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 8c10f536e478..10ce03052a19 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -197,7 +197,7 @@ int main(void)
OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled);
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
OFFSET(PACAPGD, paca_struct, pgd);
OFFSET(PACA_KERNELPGD, paca_struct, kernel_pgd);
OFFSET(PACA_EXGEN, paca_struct, exgen);
@@ -213,7 +213,7 @@ int main(void)
OFFSET(TCD_ESEL_NEXT, tlb_core_data, esel_next);
OFFSET(TCD_ESEL_MAX, tlb_core_data, esel_max);
OFFSET(TCD_ESEL_FIRST, tlb_core_data, esel_first);
-#endif /* CONFIG_PPC_BOOK3E */
+#endif /* CONFIG_PPC_BOOK3E_64 */
#ifdef CONFIG_PPC_BOOK3S_64
OFFSET(PACA_EXGEN, paca_struct, exgen);
@@ -248,7 +248,7 @@ int main(void)
#ifdef CONFIG_PPC64
OFFSET(PACA_EXIT_SAVE_R1, paca_struct, exit_save_r1);
#endif
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
#endif
OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 01ace4c56104..3e2e37e6ecab 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -292,16 +292,16 @@ _GLOBAL(enter_prom)
/* Prepare a 32-bit mode big endian MSR
*/
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
rlwinm r11,r11,0,1,31
mtsrr1 r11
rfi
-#else /* CONFIG_PPC_BOOK3E */
+#else /* CONFIG_PPC_BOOK3E_64 */
LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_LE)
andc r11,r11,r12
mtsrr1 r11
RFI_TO_KERNEL
-#endif /* CONFIG_PPC_BOOK3E */
+#endif /* CONFIG_PPC_BOOK3E_64 */
1: /* Return from OF */
FIXUP_ENDIAN
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index cf2c08902c05..da544ea0ce01 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -143,7 +143,7 @@ DEFINE_FIXED_SYMBOL(__run_at_load, first_256B)
.globl __secondary_hold
__secondary_hold:
FIXUP_ENDIAN
-#ifndef CONFIG_PPC_BOOK3E
+#ifndef CONFIG_PPC_BOOK3E_64
mfmsr r24
ori r24,r24,MSR_RI
mtmsrd r24 /* RI on */
@@ -160,7 +160,7 @@ __secondary_hold:
sync
li r26,0
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
tovirt(r26,r26)
#endif
/* All secondary cpus wait here until told to start. */
@@ -169,7 +169,7 @@ __secondary_hold:
beq 100b
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
tovirt(r12,r12)
#endif
mtctr r12
@@ -178,7 +178,7 @@ __secondary_hold:
* it may be the case that other platforms have r4 right to
* begin with, this gives us some safety in case it is not
*/
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
mr r4,r25
#else
li r4,0
@@ -214,7 +214,7 @@ USE_TEXT_SECTION()
#include "interrupt_64.S"
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
/*
* The booting_thread_hwid holds the thread id we want to boot in cpu
* hotplug case. It is set by cpu hotplug code, and is invalid by default.
@@ -322,7 +322,7 @@ _GLOBAL(fsl_secondary_thread_init)
bl book3e_secondary_thread_init
b generic_secondary_common_init
-#endif /* CONFIG_PPC_BOOK3E */
+#endif /* CONFIG_PPC_BOOK3E_64 */
/*
* On pSeries and most other platforms, secondary processors spin
@@ -345,7 +345,7 @@ _GLOBAL(generic_secondary_smp_init)
bl relative_toc
tovirt(r2,r2)
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
/* Book3E initialization */
mr r3,r24
mr r4,r25
@@ -417,7 +417,7 @@ generic_secondary_common_init:
b kexec_wait /* next kernel might do better */
2: SET_PACA(r13)
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */
mtspr SPRN_SPRG_TLB_EXFRAME,r12
#endif
@@ -519,7 +519,7 @@ __start_initialization_multiplatform:
mr r29,r9
#endif
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
bl start_initialization_book3e
b __after_prom_start
#else
@@ -540,7 +540,7 @@ __start_initialization_multiplatform:
/* Switch off MMU if not already off */
bl __mmu_off
b __after_prom_start
-#endif /* CONFIG_PPC_BOOK3E */
+#endif /* CONFIG_PPC_BOOK3E_64 */
__REF
__boot_from_prom:
@@ -587,11 +587,11 @@ __after_prom_start:
/* process relocations for the final address of the kernel */
lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
sldi r25,r25,32
-#if defined(CONFIG_PPC_BOOK3E)
+#if defined(CONFIG_PPC_BOOK3E_64)
tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
#endif
lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
-#if defined(CONFIG_PPC_BOOK3E)
+#if defined(CONFIG_PPC_BOOK3E_64)
tophys(r26,r26)
#endif
cmplwi cr0,r7,1 /* flagged to stay where we are ? */
@@ -599,7 +599,7 @@ __after_prom_start:
add r25,r25,r26
1: mr r3,r25
bl relocate
-#if defined(CONFIG_PPC_BOOK3E)
+#if defined(CONFIG_PPC_BOOK3E_64)
/* IVPR needs to be set after relocation. */
bl init_core_book3e
#endif
@@ -613,11 +613,11 @@ __after_prom_start:
* Note: This process overwrites the OF exception vectors.
*/
li r3,0 /* target addr */
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */
#endif
mr. r4,r26 /* In some cases the loader may */
-#if defined(CONFIG_PPC_BOOK3E)
+#if defined(CONFIG_PPC_BOOK3E_64)
tovirt(r4,r4)
#endif
beq 9f /* have already put us at zero */
@@ -630,14 +630,14 @@ __after_prom_start:
* variable __run_at_load, if it is set the kernel is treated as relocatable
* kernel, otherwise it will be moved to PHYSICAL_START
*/
-#if defined(CONFIG_PPC_BOOK3E)
+#if defined(CONFIG_PPC_BOOK3E_64)
tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
#endif
lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
cmplwi cr0,r7,1
bne 3f
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
LOAD_REG_ADDR(r5, __end_interrupts)
LOAD_REG_ADDR(r11, _stext)
sub r5,r5,r11
@@ -871,10 +871,10 @@ _GLOBAL(start_secondary_resume)
*/
enable_64b_mode:
mfmsr r11 /* grab the current MSR */
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */
mtmsr r11
-#else /* CONFIG_PPC_BOOK3E */
+#else /* CONFIG_PPC_BOOK3E_64 */
LOAD_REG_IMMEDIATE(r12, MSR_64BIT)
or r11,r11,r12
mtmsrd r11
@@ -940,7 +940,7 @@ start_here_multiplatform:
std r29,8(r11);
#endif
-#ifndef CONFIG_PPC_BOOK3E
+#ifndef CONFIG_PPC_BOOK3E_64
mfmsr r6
ori r6,r6,MSR_RI
mtmsrd r6 /* RI on */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index fd6d8d3a548e..36184cada00b 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -286,7 +286,7 @@ kexec_flag:
#ifdef CONFIG_KEXEC_CORE
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
/*
* BOOK3E has no real MMU mode, so we have to setup the initial TLB
* for a core to identity map v:0 to p:0. This current implementation
@@ -354,7 +354,7 @@ _GLOBAL(kexec_smp_wait)
* don't overwrite r3 here, it is live for kexec_wait above.
*/
real_mode: /* assume normal blr return */
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
/* Create an identity mapping. */
b kexec_create_tlb
#else
@@ -413,7 +413,7 @@ _GLOBAL(kexec_sequence)
lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
/* disable interrupts, we are overwriting kernel data next */
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
wrteei 0
#else
mfmsr r3
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index dfd097b79160..be8db402e963 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -186,7 +186,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
#ifdef CONFIG_PPC_PSERIES
new_paca->lppaca_ptr = NULL;
#endif
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
new_paca->kernel_pgd = swapper_pg_dir;
#endif
new_paca->lock_token = 0x8000;
@@ -203,7 +203,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
new_paca->slb_shadow_ptr = NULL;
#endif
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
/* For now -- if we have threads this will be adjusted later */
new_paca->tcd_ptr = &new_paca->tcd;
#endif
@@ -215,7 +215,7 @@ void setup_paca(struct paca_struct *new_paca)
/* Setup r13 */
local_paca = new_paca;
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
/* On Book3E, initialize the TLB miss exception frames */
mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
#else
diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
index 93f22da12abe..7912bb50a7cb 100644
--- a/arch/powerpc/kernel/setup.h
+++ b/arch/powerpc/kernel/setup.h
@@ -23,7 +23,7 @@ void check_smt_enabled(void);
static inline void check_smt_enabled(void) { }
#endif
-#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
+#if defined(CONFIG_PPC_BOOK3E_64) && defined(CONFIG_SMP)
void setup_tlb_core_data(void);
#else
static inline void setup_tlb_core_data(void) { }
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 2b2d0b0fbb30..6434a3f6acb5 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -86,7 +86,7 @@ struct ppc64_caches ppc64_caches = {
};
EXPORT_SYMBOL_GPL(ppc64_caches);
-#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
+#if defined(CONFIG_PPC_BOOK3E_64) && defined(CONFIG_SMP)
void __init setup_tlb_core_data(void)
{
int cpu;
@@ -673,7 +673,7 @@ void __init initialize_cache_info(void)
*/
__init u64 ppc64_bolted_size(void)
{
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
/* Freescale BookE bolts the entire linear mapping */
/* XXX: BookE ppc64_rma_limit setup seems to disagree? */
if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
@@ -723,7 +723,7 @@ void __init irqstack_early_init(void)
}
}
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
void __init exc_lvl_early_init(void)
{
unsigned int i;
@@ -825,7 +825,7 @@ void __init setup_per_cpu_areas(void)
/*
* BookE and BookS radix are historical values and should be revisited.
*/
- if (IS_ENABLED(CONFIG_PPC_BOOK3E)) {
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) {
atom_size = SZ_1M;
} else if (radix_enabled()) {
atom_size = PAGE_SIZE;
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index e68eb9381066..b60d81acccfc 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -71,7 +71,7 @@ SECTIONS
.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
#ifdef CONFIG_PPC64
KEEP(*(.head.text.first_256B));
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
#else
KEEP(*(.head.text.real_vectors));
*(.head.text.real_trampolines);