aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/interrupt.h6
-rw-r--r--arch/powerpc/include/asm/mman.h2
-rw-r--r--arch/powerpc/include/asm/vdso/getrandom.h54
-rw-r--r--arch/powerpc/include/asm/vdso/vsyscall.h6
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h17
5 files changed, 81 insertions, 4 deletions
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index 2d6c886b40f4..23638d4e73ac 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -177,7 +177,7 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs)
if (user_mode(regs)) {
kuap_lock();
- CT_WARN_ON(ct_state() != CONTEXT_USER);
+ CT_WARN_ON(ct_state() != CT_STATE_USER);
user_exit_irqoff();
account_cpu_user_entry();
@@ -189,8 +189,8 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs)
* so avoid recursion.
*/
if (TRAP(regs) != INTERRUPT_PROGRAM)
- CT_WARN_ON(ct_state() != CONTEXT_KERNEL &&
- ct_state() != CONTEXT_IDLE);
+ CT_WARN_ON(ct_state() != CT_STATE_KERNEL &&
+ ct_state() != CT_STATE_IDLE);
INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs));
INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) &&
search_kernel_restart_table(regs->nip));
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 17a77d47ed6d..42a51a993d94 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -6,7 +6,7 @@
#include <uapi/asm/mman.h>
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64) && !defined(BUILD_VDSO)
#include <asm/cputable.h>
#include <linux/mm.h>
diff --git a/arch/powerpc/include/asm/vdso/getrandom.h b/arch/powerpc/include/asm/vdso/getrandom.h
new file mode 100644
index 000000000000..501d6bb14e8a
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso/getrandom.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Christophe Leroy <christophe.leroy@csgroup.eu>, CS GROUP France
+ */
+#ifndef _ASM_POWERPC_VDSO_GETRANDOM_H
+#define _ASM_POWERPC_VDSO_GETRANDOM_H
+
+#ifndef __ASSEMBLY__
+
+static __always_inline int do_syscall_3(const unsigned long _r0, const unsigned long _r3,
+ const unsigned long _r4, const unsigned long _r5)
+{
+ register long r0 asm("r0") = _r0;
+ register unsigned long r3 asm("r3") = _r3;
+ register unsigned long r4 asm("r4") = _r4;
+ register unsigned long r5 asm("r5") = _r5;
+ register int ret asm ("r3");
+
+ asm volatile(
+ " sc\n"
+ " bns+ 1f\n"
+ " neg %0, %0\n"
+ "1:\n"
+ : "=r" (ret), "+r" (r4), "+r" (r5), "+r" (r0)
+ : "r" (r3)
+ : "memory", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "cr0", "ctr");
+
+ return ret;
+}
+
+/**
+ * getrandom_syscall - Invoke the getrandom() syscall.
+ * @buffer: Destination buffer to fill with random bytes.
+ * @len: Size of @buffer in bytes.
+ * @flags: Zero or more GRND_* flags.
+ * Returns: The number of bytes written to @buffer, or a negative value indicating an error.
+ */
+static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsigned int flags)
+{
+ return do_syscall_3(__NR_getrandom, (unsigned long)buffer,
+ (unsigned long)len, (unsigned long)flags);
+}
+
+static __always_inline struct vdso_rng_data *__arch_get_vdso_rng_data(void)
+{
+ return NULL;
+}
+
+ssize_t __c_kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state,
+ size_t opaque_len, const struct vdso_rng_data *vd);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_VDSO_GETRANDOM_H */
diff --git a/arch/powerpc/include/asm/vdso/vsyscall.h b/arch/powerpc/include/asm/vdso/vsyscall.h
index 48cf23f1e273..92f480d8cc6d 100644
--- a/arch/powerpc/include/asm/vdso/vsyscall.h
+++ b/arch/powerpc/include/asm/vdso/vsyscall.h
@@ -17,6 +17,12 @@ struct vdso_data *__arch_get_k_vdso_data(void)
}
#define __arch_get_k_vdso_data __arch_get_k_vdso_data
+static __always_inline
+struct vdso_rng_data *__arch_get_k_vdso_rng_data(void)
+{
+ return &vdso_data->rng_data;
+}
+
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index a585c8e538ff..248dee138f7b 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -83,6 +83,7 @@ struct vdso_arch_data {
__u32 compat_syscall_map[SYSCALL_MAP_SIZE]; /* Map of compat syscalls */
struct vdso_data data[CS_BASES];
+ struct vdso_rng_data rng_data;
};
#else /* CONFIG_PPC64 */
@@ -95,6 +96,7 @@ struct vdso_arch_data {
__u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */
__u32 compat_syscall_map[0]; /* No compat syscalls on PPC32 */
struct vdso_data data[CS_BASES];
+ struct vdso_rng_data rng_data;
};
#endif /* CONFIG_PPC64 */
@@ -111,6 +113,21 @@ extern struct vdso_arch_data *vdso_data;
addi \ptr, \ptr, (_vdso_datapage - 999b)@l
.endm
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+
+.macro get_realdatapage ptr scratch
+ get_datapage \ptr
+#ifdef CONFIG_TIME_NS
+ lwz \scratch, VDSO_CLOCKMODE_OFFSET(\ptr)
+ xoris \scratch, \scratch, VDSO_CLOCKMODE_TIMENS@h
+ xori \scratch, \scratch, VDSO_CLOCKMODE_TIMENS@l
+ cntlzw \scratch, \scratch
+ rlwinm \scratch, \scratch, PAGE_SHIFT - 5, 1 << PAGE_SHIFT
+ add \ptr, \ptr, \scratch
+#endif
+.endm
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */