diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/Kbuild | 1 | ||||
-rw-r--r-- | include/asm-generic/audit_change_attr.h | 6 | ||||
-rw-r--r-- | include/asm-generic/delay.h | 96 | ||||
-rw-r--r-- | include/asm-generic/uaccess.h | 2 | ||||
-rw-r--r-- | include/asm-generic/unaligned.h | 146 | ||||
-rw-r--r-- | include/asm-generic/vdso/vsyscall.h | 3 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 4 |
7 files changed, 81 insertions, 177 deletions
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 620b6da429d4..1b43c3a77012 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -58,7 +58,6 @@ mandatory-y += tlbflush.h mandatory-y += topology.h mandatory-y += trace_clock.h mandatory-y += uaccess.h -mandatory-y += unaligned.h mandatory-y += vermagic.h mandatory-y += vga.h mandatory-y += video.h diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h index 331670807cf0..cc840537885f 100644 --- a/include/asm-generic/audit_change_attr.h +++ b/include/asm-generic/audit_change_attr.h @@ -11,9 +11,15 @@ __NR_lchown, __NR_fchown, #endif __NR_setxattr, +#ifdef __NR_setxattrat +__NR_setxattrat, +#endif __NR_lsetxattr, __NR_fsetxattr, __NR_removexattr, +#ifdef __NR_removexattrat +__NR_removexattrat, +#endif __NR_lremovexattr, __NR_fremovexattr, #ifdef __NR_fchownat diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h index e448ac61430c..76cf237b6e4c 100644 --- a/include/asm-generic/delay.h +++ b/include/asm-generic/delay.h @@ -2,6 +2,9 @@ #ifndef __ASM_GENERIC_DELAY_H #define __ASM_GENERIC_DELAY_H +#include <linux/math.h> +#include <vdso/time64.h> + /* Undefined functions to get compile-time errors */ extern void __bad_udelay(void); extern void __bad_ndelay(void); @@ -12,34 +15,73 @@ extern void __const_udelay(unsigned long xloops); extern void __delay(unsigned long loops); /* - * The weird n/20000 thing suppresses a "comparison is always false due to - * limited range of data type" warning with non-const 8-bit arguments. + * The microseconds/nanosecond delay multiplicators are used to convert a + * constant microseconds/nanoseconds value to a value which can be used by the + * architectures specific implementation to transform it into loops. + */ +#define UDELAY_CONST_MULT ((unsigned long)DIV_ROUND_UP(1ULL << 32, USEC_PER_SEC)) +#define NDELAY_CONST_MULT ((unsigned long)DIV_ROUND_UP(1ULL << 32, NSEC_PER_SEC)) + +/* + * The maximum constant udelay/ndelay value picked out of thin air to prevent + * too long constant udelays/ndelays. */ +#define DELAY_CONST_MAX 20000 -/* 0x10c7 is 2**32 / 1000000 (rounded up) */ -#define udelay(n) \ - ({ \ - if (__builtin_constant_p(n)) { \ - if ((n) / 20000 >= 1) \ - __bad_udelay(); \ - else \ - __const_udelay((n) * 0x10c7ul); \ - } else { \ - __udelay(n); \ - } \ - }) - -/* 0x5 is 2**32 / 1000000000 (rounded up) */ -#define ndelay(n) \ - ({ \ - if (__builtin_constant_p(n)) { \ - if ((n) / 20000 >= 1) \ - __bad_ndelay(); \ - else \ - __const_udelay((n) * 5ul); \ - } else { \ - __ndelay(n); \ - } \ - }) +/** + * udelay - Inserting a delay based on microseconds with busy waiting + * @usec: requested delay in microseconds + * + * When delaying in an atomic context ndelay(), udelay() and mdelay() are the + * only valid variants of delaying/sleeping to go with. + * + * When inserting delays in non atomic context which are shorter than the time + * which is required to queue e.g. an hrtimer and to enter then the scheduler, + * it is also valuable to use udelay(). But it is not simple to specify a + * generic threshold for this which will fit for all systems. An approximation + * is a threshold for all delays up to 10 microseconds. + * + * When having a delay which is larger than the architecture specific + * %MAX_UDELAY_MS value, please make sure mdelay() is used. Otherwise a overflow + * risk is given. + * + * Please note that ndelay(), udelay() and mdelay() may return early for several + * reasons (https://lists.openwall.net/linux-kernel/2011/01/09/56): + * + * #. computed loops_per_jiffy too low (due to the time taken to execute the + * timer interrupt.) + * #. cache behaviour affecting the time it takes to execute the loop function. + * #. CPU clock rate changes. + */ +static __always_inline void udelay(unsigned long usec) +{ + if (__builtin_constant_p(usec)) { + if (usec >= DELAY_CONST_MAX) + __bad_udelay(); + else + __const_udelay(usec * UDELAY_CONST_MULT); + } else { + __udelay(usec); + } +} + +/** + * ndelay - Inserting a delay based on nanoseconds with busy waiting + * @nsec: requested delay in nanoseconds + * + * See udelay() for basic information about ndelay() and it's variants. + */ +static __always_inline void ndelay(unsigned long nsec) +{ + if (__builtin_constant_p(nsec)) { + if (nsec >= DELAY_CONST_MAX) + __bad_udelay(); + else + __const_udelay(nsec * NDELAY_CONST_MULT); + } else { + __udelay(nsec); + } +} +#define ndelay(x) ndelay(x) #endif /* __ASM_GENERIC_DELAY_H */ diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index a5be9e61a2a2..b276f783494c 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -11,7 +11,7 @@ #include <asm-generic/access_ok.h> #ifdef CONFIG_UACCESS_MEMCPY -#include <asm/unaligned.h> +#include <linux/unaligned.h> static __always_inline int __get_user_fn(size_t size, const void __user *from, void *to) diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h deleted file mode 100644 index 95acdd70b3b2..000000000000 --- a/include/asm-generic/unaligned.h +++ /dev/null @@ -1,146 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_GENERIC_UNALIGNED_H -#define __ASM_GENERIC_UNALIGNED_H - -/* - * This is the most generic implementation of unaligned accesses - * and should work almost anywhere. - */ -#include <linux/unaligned/packed_struct.h> -#include <asm/byteorder.h> -#include <vdso/unaligned.h> - -#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr)) -#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr)) - -static inline u16 get_unaligned_le16(const void *p) -{ - return le16_to_cpu(__get_unaligned_t(__le16, p)); -} - -static inline u32 get_unaligned_le32(const void *p) -{ - return le32_to_cpu(__get_unaligned_t(__le32, p)); -} - -static inline u64 get_unaligned_le64(const void *p) -{ - return le64_to_cpu(__get_unaligned_t(__le64, p)); -} - -static inline void put_unaligned_le16(u16 val, void *p) -{ - __put_unaligned_t(__le16, cpu_to_le16(val), p); -} - -static inline void put_unaligned_le32(u32 val, void *p) -{ - __put_unaligned_t(__le32, cpu_to_le32(val), p); -} - -static inline void put_unaligned_le64(u64 val, void *p) -{ - __put_unaligned_t(__le64, cpu_to_le64(val), p); -} - -static inline u16 get_unaligned_be16(const void *p) -{ - return be16_to_cpu(__get_unaligned_t(__be16, p)); -} - -static inline u32 get_unaligned_be32(const void *p) -{ - return be32_to_cpu(__get_unaligned_t(__be32, p)); -} - -static inline u64 get_unaligned_be64(const void *p) -{ - return be64_to_cpu(__get_unaligned_t(__be64, p)); -} - -static inline void put_unaligned_be16(u16 val, void *p) -{ - __put_unaligned_t(__be16, cpu_to_be16(val), p); -} - -static inline void put_unaligned_be32(u32 val, void *p) -{ - __put_unaligned_t(__be32, cpu_to_be32(val), p); -} - -static inline void put_unaligned_be64(u64 val, void *p) -{ - __put_unaligned_t(__be64, cpu_to_be64(val), p); -} - -static inline u32 __get_unaligned_be24(const u8 *p) -{ - return p[0] << 16 | p[1] << 8 | p[2]; -} - -static inline u32 get_unaligned_be24(const void *p) -{ - return __get_unaligned_be24(p); -} - -static inline u32 __get_unaligned_le24(const u8 *p) -{ - return p[0] | p[1] << 8 | p[2] << 16; -} - -static inline u32 get_unaligned_le24(const void *p) -{ - return __get_unaligned_le24(p); -} - -static inline void __put_unaligned_be24(const u32 val, u8 *p) -{ - *p++ = (val >> 16) & 0xff; - *p++ = (val >> 8) & 0xff; - *p++ = val & 0xff; -} - -static inline void put_unaligned_be24(const u32 val, void *p) -{ - __put_unaligned_be24(val, p); -} - -static inline void __put_unaligned_le24(const u32 val, u8 *p) -{ - *p++ = val & 0xff; - *p++ = (val >> 8) & 0xff; - *p++ = (val >> 16) & 0xff; -} - -static inline void put_unaligned_le24(const u32 val, void *p) -{ - __put_unaligned_le24(val, p); -} - -static inline void __put_unaligned_be48(const u64 val, u8 *p) -{ - *p++ = (val >> 40) & 0xff; - *p++ = (val >> 32) & 0xff; - *p++ = (val >> 24) & 0xff; - *p++ = (val >> 16) & 0xff; - *p++ = (val >> 8) & 0xff; - *p++ = val & 0xff; -} - -static inline void put_unaligned_be48(const u64 val, void *p) -{ - __put_unaligned_be48(val, p); -} - -static inline u64 __get_unaligned_be48(const u8 *p) -{ - return (u64)p[0] << 40 | (u64)p[1] << 32 | (u64)p[2] << 24 | - p[3] << 16 | p[4] << 8 | p[5]; -} - -static inline u64 get_unaligned_be48(const void *p) -{ - return __get_unaligned_be48(p); -} - -#endif /* __ASM_GENERIC_UNALIGNED_H */ diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h index c835607f78ae..01dafd604188 100644 --- a/include/asm-generic/vdso/vsyscall.h +++ b/include/asm-generic/vdso/vsyscall.h @@ -12,8 +12,7 @@ static __always_inline struct vdso_data *__arch_get_k_vdso_data(void) #endif /* __arch_get_k_vdso_data */ #ifndef __arch_update_vsyscall -static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata, - struct timekeeper *tk) +static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata) { } #endif /* __arch_update_vsyscall */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 19ec49a9179b..eeadbaeccf88 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -919,6 +919,10 @@ #define RUNTIME_CONST(t,x) NAMED_SECTION(runtime_##t##_##x) +#define RUNTIME_CONST_VARIABLES \ + RUNTIME_CONST(shift, d_hash_shift) \ + RUNTIME_CONST(ptr, dentry_hashtable) + /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */ #define KUNIT_TABLE() \ . = ALIGN(8); \ |