diff options
Diffstat (limited to 'arch/powerpc')
28 files changed, 151 insertions, 93 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 3c0ba22dc360..db0a1c281587 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -75,7 +75,7 @@ .macro kuap_check current, gpr #ifdef CONFIG_PPC_KUAP_DEBUG - lwz \gpr2, KUAP(thread) + lwz \gpr, KUAP(thread) 999: twnei \gpr, 0 EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) #endif diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 2781ebf6add4..6fc4520092c7 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -251,7 +251,8 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start, extern void hash__vmemmap_remove_mapping(unsigned long start, unsigned long page_size); -int hash__create_section_mapping(unsigned long start, unsigned long end, int nid); +int hash__create_section_mapping(unsigned long start, unsigned long end, + int nid, pgprot_t prot); int hash__remove_section_mapping(unsigned long start, unsigned long end); #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index a1c60d5b50af..08c222d5b764 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -294,7 +294,8 @@ static inline unsigned long radix__get_tree_size(void) } #ifdef CONFIG_MEMORY_HOTPLUG -int radix__create_section_mapping(unsigned long start, unsigned long end, int nid); +int radix__create_section_mapping(unsigned long start, unsigned long end, + int nid, pgprot_t prot); int radix__remove_section_mapping(unsigned long start, unsigned long end); #endif /* CONFIG_MEMORY_HOTPLUG */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index e0e71777961f..3a0db7b0b46e 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -250,9 +250,27 @@ static inline bool arch_irqs_disabled(void) } \ } while(0) +static inline bool __lazy_irq_pending(u8 irq_happened) +{ + return !!(irq_happened & ~PACA_IRQ_HARD_DIS); +} + +/* + * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled. + */ static inline bool lazy_irq_pending(void) { - return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS); + return __lazy_irq_pending(get_paca()->irq_happened); +} + +/* + * Check if a lazy IRQ is pending, with no debugging checks. + * Should be called with IRQs hard disabled. + * For use in RI disabled code or other constrained situations. + */ +static inline bool lazy_irq_pending_nocheck(void) +{ + return __lazy_irq_pending(local_paca->irq_happened); } /* diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index 356658711a86..5398bfc465b4 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -3,28 +3,10 @@ #define _ASM_POWERPC_MODULE_H #ifdef __KERNEL__ -/* - */ - #include <linux/list.h> #include <asm/bug.h> #include <asm-generic/module.h> - -#ifdef CONFIG_MPROFILE_KERNEL -#define MODULE_ARCH_VERMAGIC_FTRACE "mprofile-kernel " -#else -#define MODULE_ARCH_VERMAGIC_FTRACE "" -#endif - -#ifdef CONFIG_RELOCATABLE -#define MODULE_ARCH_VERMAGIC_RELOCATABLE "relocatable " -#else -#define MODULE_ARCH_VERMAGIC_RELOCATABLE "" -#endif - -#define MODULE_ARCH_VERMAGIC MODULE_ARCH_VERMAGIC_FTRACE MODULE_ARCH_VERMAGIC_RELOCATABLE - #ifndef __powerpc64__ /* * Thanks to Paul M for explaining this. diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 080a0bf8e54b..3ee8df0f66e0 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -240,13 +240,8 @@ static inline bool pfn_valid(unsigned long pfn) * and needs to be executable. This means the whole heap ends * up being executable. */ -#define VM_DATA_DEFAULT_FLAGS32 \ - (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ - VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS32 VM_DATA_FLAGS_TSK_EXEC +#define VM_DATA_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC #ifdef __powerpc64__ #include <asm/page_64.h> diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 5962797f784a..79a9b7c6a132 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -94,11 +94,8 @@ extern u64 ppc64_pft_size; * stack by default, so in the absence of a PT_GNU_STACK program header * we turn execute permission off. */ -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_STACK_DEFAULT_FLAGS32 VM_DATA_FLAGS_EXEC +#define VM_STACK_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC #define VM_STACK_DEFAULT_FLAGS \ (is_32bit_task() ? \ diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h index 3192d454a733..c89b32443cff 100644 --- a/arch/powerpc/include/asm/sparsemem.h +++ b/arch/powerpc/include/asm/sparsemem.h @@ -13,7 +13,8 @@ #endif /* CONFIG_SPARSEMEM */ #ifdef CONFIG_MEMORY_HOTPLUG -extern int create_section_mapping(unsigned long start, unsigned long end, int nid); +extern int create_section_mapping(unsigned long start, unsigned long end, + int nid, pgprot_t prot); extern int remove_section_mapping(unsigned long start, unsigned long end); #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 2f500debae21..0969285996cb 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -166,13 +166,17 @@ do { \ ({ \ long __pu_err; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(size) __pu_size = (size); \ + \ if (!is_kernel_addr((unsigned long)__pu_addr)) \ might_fault(); \ - __chk_user_ptr(ptr); \ + __chk_user_ptr(__pu_addr); \ if (do_allow) \ - __put_user_size((x), __pu_addr, (size), __pu_err); \ + __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ else \ - __put_user_size_allowed((x), __pu_addr, (size), __pu_err); \ + __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \ + \ __pu_err; \ }) @@ -180,9 +184,13 @@ do { \ ({ \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(size) __pu_size = (size); \ + \ might_fault(); \ - if (access_ok(__pu_addr, size)) \ - __put_user_size((x), __pu_addr, (size), __pu_err); \ + if (access_ok(__pu_addr, __pu_size)) \ + __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ + \ __pu_err; \ }) @@ -190,8 +198,12 @@ do { \ ({ \ long __pu_err; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - __chk_user_ptr(ptr); \ - __put_user_size((x), __pu_addr, (size), __pu_err); \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(size) __pu_size = (size); \ + \ + __chk_user_ptr(__pu_addr); \ + __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ + \ __pu_err; \ }) @@ -283,15 +295,18 @@ do { \ long __gu_err; \ __long_type(*(ptr)) __gu_val; \ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - __chk_user_ptr(ptr); \ + __typeof__(size) __gu_size = (size); \ + \ + __chk_user_ptr(__gu_addr); \ if (!is_kernel_addr((unsigned long)__gu_addr)) \ might_fault(); \ barrier_nospec(); \ if (do_allow) \ - __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ else \ - __get_user_size_allowed(__gu_val, __gu_addr, (size), __gu_err); \ + __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \ (x) = (__typeof__(*(ptr)))__gu_val; \ + \ __gu_err; \ }) @@ -300,12 +315,15 @@ do { \ long __gu_err = -EFAULT; \ __long_type(*(ptr)) __gu_val = 0; \ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + __typeof__(size) __gu_size = (size); \ + \ might_fault(); \ - if (access_ok(__gu_addr, (size))) { \ + if (access_ok(__gu_addr, __gu_size)) { \ barrier_nospec(); \ - __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ } \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ + \ __gu_err; \ }) @@ -314,10 +332,13 @@ do { \ long __gu_err; \ __long_type(*(ptr)) __gu_val; \ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - __chk_user_ptr(ptr); \ + __typeof__(size) __gu_size = (size); \ + \ + __chk_user_ptr(__gu_addr); \ barrier_nospec(); \ - __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ + \ __gu_err; \ }) diff --git a/arch/powerpc/include/asm/vermagic.h b/arch/powerpc/include/asm/vermagic.h new file mode 100644 index 000000000000..b054a8576e5d --- /dev/null +++ b/arch/powerpc/include/asm/vermagic.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_VERMAGIC_H +#define _ASM_VERMAGIC_H + +#ifdef CONFIG_MPROFILE_KERNEL +#define MODULE_ARCH_VERMAGIC_FTRACE "mprofile-kernel " +#else +#define MODULE_ARCH_VERMAGIC_FTRACE "" +#endif + +#ifdef CONFIG_RELOCATABLE +#define MODULE_ARCH_VERMAGIC_RELOCATABLE "relocatable " +#else +#define MODULE_ARCH_VERMAGIC_RELOCATABLE "" +#endif + +#define MODULE_ARCH_VERMAGIC \ + MODULE_ARCH_VERMAGIC_FTRACE MODULE_ARCH_VERMAGIC_RELOCATABLE + +#endif /* _ASM_VERMAGIC_H */ diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index a6371fb8f761..8420abd4ea1c 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -732,7 +732,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE) stw r10,_CCR(r1) stw r1,KSP(r3) /* Set old stack pointer */ - kuap_check r2, r4 + kuap_check r2, r0 #ifdef CONFIG_SMP /* We need a sync somewhere here to make sure that if the * previous task gets rescheduled on another CPU, it sees all diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 9a1e5d636dea..b3c9f15089b6 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -472,15 +472,17 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) #ifdef CONFIG_PPC_BOOK3S /* * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not - * touched, AMR not set, no exit work created, then this can be used. + * touched, no exit work created, then this can be used. */ .balign IFETCH_ALIGN_BYTES .globl fast_interrupt_return fast_interrupt_return: _ASM_NOKPROBE_SYMBOL(fast_interrupt_return) + kuap_check_amr r3, r4 ld r4,_MSR(r1) andi. r0,r4,MSR_PR bne .Lfast_user_interrupt_return + kuap_restore_amr r3 andi. r0,r4,MSR_RI li r3,0 /* 0 return value, no EMULATE_STACK_STORE */ bne+ .Lfast_kernel_interrupt_return diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 728ccb0f560c..b0ad930cbae5 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -971,6 +971,7 @@ EXC_COMMON_BEGIN(system_reset_common) ld r10,SOFTE(r1) stb r10,PACAIRQSOFTMASK(r13) + kuap_restore_amr r10 EXCEPTION_RESTORE_REGS RFI_TO_USER_OR_KERNEL diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 9bb663977e84..2cec543c38f0 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -344,8 +344,9 @@ _ENTRY(saved_ksp_limit) /* 0x0C00 - System Call Exception */ START_EXCEPTION(0x0C00, SystemCall) SYSCALL_ENTRY 0xc00 +/* Trap_0D is commented out to get more space for system call exception */ - EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD) +/* EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD) */ EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_STD) EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_STD) diff --git a/arch/powerpc/kernel/ima_arch.c b/arch/powerpc/kernel/ima_arch.c index e34116255ced..957abd592075 100644 --- a/arch/powerpc/kernel/ima_arch.c +++ b/arch/powerpc/kernel/ima_arch.c @@ -19,12 +19,12 @@ bool arch_ima_get_secureboot(void) * to be stored as an xattr or as an appended signature. * * To avoid duplicate signature verification as much as possible, the IMA - * policy rule for module appraisal is added only if CONFIG_MODULE_SIG_FORCE + * policy rule for module appraisal is added only if CONFIG_MODULE_SIG * is not enabled. */ static const char *const secure_rules[] = { "appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig", -#ifndef CONFIG_MODULE_SIG_FORCE +#ifndef CONFIG_MODULE_SIG "appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig", #endif NULL @@ -50,7 +50,7 @@ static const char *const secure_and_trusted_rules[] = { "measure func=KEXEC_KERNEL_CHECK template=ima-modsig", "measure func=MODULE_CHECK template=ima-modsig", "appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig", -#ifndef CONFIG_MODULE_SIG_FORCE +#ifndef CONFIG_MODULE_SIG "appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig", #endif NULL diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 438a9befce41..8105010b0e76 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -534,6 +534,8 @@ static bool __init parse_cache_info(struct device_node *np, lsizep = of_get_property(np, propnames[3], NULL); if (bsizep == NULL) bsizep = lsizep; + if (lsizep == NULL) + lsizep = bsizep; if (lsizep != NULL) lsize = be32_to_cpu(*lsizep); if (bsizep != NULL) diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c index c74295a7765b..7b7c89cad901 100644 --- a/arch/powerpc/kernel/syscall_64.c +++ b/arch/powerpc/kernel/syscall_64.c @@ -35,6 +35,8 @@ notrace long system_call_exception(long r3, long r4, long r5, BUG_ON(!FULL_REGS(regs)); BUG_ON(regs->softe != IRQS_ENABLED); + kuap_check_amr(); + account_cpu_user_entry(); #ifdef CONFIG_PPC_SPLPAR @@ -47,8 +49,6 @@ notrace long system_call_exception(long r3, long r4, long r5, } #endif - kuap_check_amr(); - /* * This is not required for the syscall exit path, but makes the * stack frame look nicer. If this was initialised in the first stack @@ -117,6 +117,8 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, unsigned long ti_flags; unsigned long ret = 0; + kuap_check_amr(); + regs->result = r3; /* Check whether the syscall is issued inside a restartable sequence */ @@ -189,7 +191,7 @@ again: /* This pattern matches prep_irq_for_idle */ __hard_EE_RI_disable(); - if (unlikely(lazy_irq_pending())) { + if (unlikely(lazy_irq_pending_nocheck())) { __hard_RI_enable(); trace_hardirqs_off(); local_paca->irq_happened |= PACA_IRQ_HARD_DIS; @@ -204,8 +206,6 @@ again: local_paca->tm_scratch = regs->msr; #endif - kuap_check_amr(); - account_cpu_user_exit(); return ret; @@ -228,6 +228,8 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned BUG_ON(!FULL_REGS(regs)); BUG_ON(regs->softe != IRQS_ENABLED); + kuap_check_amr(); + local_irq_save(flags); again: @@ -264,7 +266,7 @@ again: trace_hardirqs_on(); __hard_EE_RI_disable(); - if (unlikely(lazy_irq_pending())) { + if (unlikely(lazy_irq_pending_nocheck())) { __hard_RI_enable(); trace_hardirqs_off(); local_paca->irq_happened |= PACA_IRQ_HARD_DIS; @@ -292,8 +294,6 @@ again: local_paca->tm_scratch = regs->msr; #endif - kuap_check_amr(); - account_cpu_user_exit(); return ret; @@ -313,6 +313,8 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign BUG_ON(regs->msr & MSR_PR); BUG_ON(!FULL_REGS(regs)); + kuap_check_amr(); + if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) { clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp); ret = 1; @@ -334,7 +336,7 @@ again: trace_hardirqs_on(); __hard_EE_RI_disable(); - if (unlikely(lazy_irq_pending())) { + if (unlikely(lazy_irq_pending_nocheck())) { __hard_RI_enable(); irq_soft_mask_set(IRQS_ALL_DISABLED); trace_hardirqs_off(); diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index a3951567118a..e7f8f9f1b3f4 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -218,11 +218,11 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) blr /* - * invalid clock + * syscall fallback */ 99: - li r3, EINVAL - crset so + li r0,__NR_clock_getres + sc blr .cfi_endproc V_FUNCTION_END(__kernel_clock_getres) diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 6404df613ea3..2b35f9bcf892 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -604,18 +604,19 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, */ local_irq_disable(); ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); + pte = __pte(0); + if (ptep) + pte = *ptep; + local_irq_enable(); /* * If the PTE disappeared temporarily due to a THP * collapse, just return and let the guest try again. */ - if (!ptep) { - local_irq_enable(); + if (!pte_present(pte)) { if (page) put_page(page); return RESUME_GUEST; } - pte = *ptep; - local_irq_enable(); hpa = pte_pfn(pte) << PAGE_SHIFT; pte_size = PAGE_SIZE; if (shift) diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 9f050064d2a2..aa12cd4078b3 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -815,18 +815,19 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, */ local_irq_disable(); ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); + pte = __pte(0); + if (ptep) + pte = *ptep; + local_irq_enable(); /* * If the PTE disappeared temporarily due to a THP * collapse, just return and let the guest try again. */ - if (!ptep) { - local_irq_enable(); + if (!pte_present(pte)) { if (page) put_page(page); return RESUME_GUEST; } - pte = *ptep; - local_irq_enable(); /* If we're logging dirty pages, always map single pages */ large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index e15166b0a16d..ad2f172c26a6 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -521,6 +521,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_IOEVENTFD: case KVM_CAP_DEVICE_CTRL: case KVM_CAP_IMMEDIATE_EXIT: + case KVM_CAP_SET_GUEST_DEBUG: r = 1; break; case KVM_CAP_PPC_GUEST_DEBUG_SSTEP: diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 7e5714a69a58..8ed2411c3f39 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -809,7 +809,8 @@ int resize_hpt_for_hotplug(unsigned long new_mem_size) return 0; } -int hash__create_section_mapping(unsigned long start, unsigned long end, int nid) +int hash__create_section_mapping(unsigned long start, unsigned long end, + int nid, pgprot_t prot) { int rc; @@ -819,7 +820,7 @@ int hash__create_section_mapping(unsigned long start, unsigned long end, int nid } rc = htab_bolt_mapping(start, end, __pa(start), - pgprot_val(PAGE_KERNEL), mmu_linear_psize, + pgprot_val(prot), mmu_linear_psize, mmu_kernel_ssize); if (rc < 0) { diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 2bf7e1b4fd82..e0bb69c616e4 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -171,12 +171,13 @@ void mmu_cleanup_all(void) } #ifdef CONFIG_MEMORY_HOTPLUG -int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid) +int __meminit create_section_mapping(unsigned long start, unsigned long end, + int nid, pgprot_t prot) { if (radix_enabled()) - return radix__create_section_mapping(start, end, nid); + return radix__create_section_mapping(start, end, nid, prot); - return hash__create_section_mapping(start, end, nid); + return hash__create_section_mapping(start, end, nid, prot); } int __meminit remove_section_mapping(unsigned long start, unsigned long end) diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index 07527f1ed108..1199fc2bfaec 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -315,7 +315,7 @@ int __execute_only_pkey(struct mm_struct *mm) static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) { /* Do this check first since the vm_flags should be hot */ - if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC) + if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC) return false; return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey); diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 2a9a0cd79490..8f9edf07063a 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -254,7 +254,7 @@ static unsigned long next_boundary(unsigned long addr, unsigned long end) static int __meminit create_physical_mapping(unsigned long start, unsigned long end, - int nid) + int nid, pgprot_t _prot) { unsigned long vaddr, addr, mapping_size = 0; bool prev_exec, exec = false; @@ -290,7 +290,7 @@ static int __meminit create_physical_mapping(unsigned long start, prot = PAGE_KERNEL_X; exec = true; } else { - prot = PAGE_KERNEL; + prot = _prot; exec = false; } @@ -334,7 +334,7 @@ static void __init radix_init_pgtable(void) WARN_ON(create_physical_mapping(reg->base, reg->base + reg->size, - -1)); + -1, PAGE_KERNEL)); } /* Find out how many PID bits are supported */ @@ -713,8 +713,10 @@ static int __meminit stop_machine_change_mapping(void *data) spin_unlock(&init_mm.page_table_lock); pte_clear(&init_mm, params->aligned_start, params->pte); - create_physical_mapping(__pa(params->aligned_start), __pa(params->start), -1); - create_physical_mapping(__pa(params->end), __pa(params->aligned_end), -1); + create_physical_mapping(__pa(params->aligned_start), + __pa(params->start), -1, PAGE_KERNEL); + create_physical_mapping(__pa(params->end), __pa(params->aligned_end), + -1, PAGE_KERNEL); spin_lock(&init_mm.page_table_lock); return 0; } @@ -871,14 +873,16 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end) radix__flush_tlb_kernel_range(start, end); } -int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid) +int __meminit radix__create_section_mapping(unsigned long start, + unsigned long end, int nid, + pgprot_t prot) { if (end >= RADIX_VMALLOC_START) { pr_warn("Outside the supported range\n"); return -1; } - return create_physical_mapping(__pa(start), __pa(end), nid); + return create_physical_mapping(__pa(start), __pa(end), nid, prot); } int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 9b4f5fb719e0..041ed7cfd341 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -90,7 +90,8 @@ int memory_add_physaddr_to_nid(u64 start) } #endif -int __weak create_section_mapping(unsigned long start, unsigned long end, int nid) +int __weak create_section_mapping(unsigned long start, unsigned long end, + int nid, pgprot_t prot) { return -ENODEV; } @@ -122,7 +123,7 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, } int __ref arch_add_memory(int nid, u64 start, u64 size, - struct mhp_restrictions *restrictions) + struct mhp_params *params) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; @@ -131,14 +132,15 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, resize_hpt_for_hotplug(memblock_phys_mem_size()); start = (unsigned long)__va(start); - rc = create_section_mapping(start, start + size, nid); + rc = create_section_mapping(start, start + size, nid, + params->pgprot); if (rc) { pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n", start, start + size, rc); return -EFAULT; } - return __add_pages(nid, start_pfn, nr_pages, restrictions); + return __add_pages(nid, start_pfn, nr_pages, params); } void __ref arch_remove_memory(int nid, u64 start, u64 size, diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 3189308dece4..d83a12c5bc7f 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -185,6 +185,7 @@ void mmu_mark_initmem_nx(void) mmu_mapin_ram_chunk(etext8, einittext8, PAGE_KERNEL); } } + _tlbil_all(); } #ifdef CONFIG_STRICT_KERNEL_RWX @@ -199,6 +200,8 @@ void mmu_mark_rodata_ro(void) ~(LARGE_PAGE_SIZE_8M - 1))); mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext)); + _tlbil_all(); + /* Update page tables for PTDUMP and BDI */ mmu_mapin_ram_chunk(0, sinittext, __pgprot(0)); mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_ROX); diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 0c3c1902135c..27a81c291be8 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -397,7 +397,7 @@ config PPC_KUAP config PPC_KUAP_DEBUG bool "Extra debugging for Kernel Userspace Access Protection" - depends on PPC_KUAP && (PPC_RADIX_MMU || PPC_32) + depends on PPC_KUAP && (PPC_RADIX_MMU || PPC32) help Add extra debugging for Kernel Userspace Access Protection (KUAP) If you're unsure, say N. |