From dfe564045c653d9e6969ccca57a8a04771d333f7 Mon Sep 17 00:00:00 2001 From: chao Date: Sun, 30 Aug 2020 23:41:17 -0700 Subject: rcu: Panic after fixed number of stalls Some stalls are transient, so that system fully recovers. This commit therefore allows users to configure the number of stalls that must happen in order to trigger kernel panic. Signed-off-by: chao Signed-off-by: Paul E. McKenney --- include/linux/kernel.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux/kernel.h') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2f05e9128201..4b5fd3da5fe8 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -536,6 +536,7 @@ extern int panic_on_warn; extern unsigned long panic_on_taint; extern bool panic_on_taint_nousertaint; extern int sysctl_panic_on_rcu_stall; +extern int sysctl_max_rcu_stall_to_panic; extern int sysctl_panic_on_stackoverflow; extern bool crash_kexec_post_notifiers; -- cgit From 74d862b682f51e45d25b95b1ecf212428a4967b0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 18 Nov 2020 20:48:42 +0100 Subject: sched: Make migrate_disable/enable() independent of RT Now that the scheduler can deal with migrate disable properly, there is no real compelling reason to make it only available for RT. There are quite some code pathes which needlessly disable preemption in order to prevent migration and some constructs like kmap_atomic() enforce it implicitly. Making it available independent of RT allows to provide a preemptible variant of kmap_atomic() and makes the code more consistent in general. Signed-off-by: Thomas Gleixner Grudgingly-Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20201118204007.269943012@linutronix.de --- include/linux/kernel.h | 21 ++++++++++++++------- include/linux/preempt.h | 38 +++----------------------------------- include/linux/sched.h | 2 +- kernel/sched/core.c | 45 +++++++++++++++++++++++++++++++++++---------- kernel/sched/sched.h | 4 ++-- lib/smp_processor_id.c | 2 +- 6 files changed, 56 insertions(+), 56 deletions(-) (limited to 'include/linux/kernel.h') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2f05e9128201..665837f9a831 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -204,6 +204,7 @@ extern int _cond_resched(void); extern void ___might_sleep(const char *file, int line, int preempt_offset); extern void __might_sleep(const char *file, int line, int preempt_offset); extern void __cant_sleep(const char *file, int line, int preempt_offset); +extern void __cant_migrate(const char *file, int line); /** * might_sleep - annotation for functions that can sleep @@ -227,6 +228,18 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); # define cant_sleep() \ do { __cant_sleep(__FILE__, __LINE__, 0); } while (0) # define sched_annotate_sleep() (current->task_state_change = 0) + +/** + * cant_migrate - annotation for functions that cannot migrate + * + * Will print a stack trace if executed in code which is migratable + */ +# define cant_migrate() \ + do { \ + if (IS_ENABLED(CONFIG_SMP)) \ + __cant_migrate(__FILE__, __LINE__); \ + } while (0) + /** * non_block_start - annotate the start of section where sleeping is prohibited * @@ -251,6 +264,7 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); int preempt_offset) { } # define might_sleep() do { might_resched(); } while (0) # define cant_sleep() do { } while (0) +# define cant_migrate() do { } while (0) # define sched_annotate_sleep() do { } while (0) # define non_block_start() do { } while (0) # define non_block_end() do { } while (0) @@ -258,13 +272,6 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) -#ifndef CONFIG_PREEMPT_RT -# define cant_migrate() cant_sleep() -#else - /* Placeholder for now */ -# define cant_migrate() do { } while (0) -#endif - /** * abs - return absolute value of an argument * @x: the value. If it is unsigned type, it is converted to signed type first. diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 8b43922e65df..6df63cbe8bb0 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -322,7 +322,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier, #endif -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT) +#ifdef CONFIG_SMP /* * Migrate-Disable and why it is undesired. @@ -382,43 +382,11 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier, extern void migrate_disable(void); extern void migrate_enable(void); -#elif defined(CONFIG_PREEMPT_RT) +#else static inline void migrate_disable(void) { } static inline void migrate_enable(void) { } -#else /* !CONFIG_PREEMPT_RT */ - -/** - * migrate_disable - Prevent migration of the current task - * - * Maps to preempt_disable() which also disables preemption. Use - * migrate_disable() to annotate that the intent is to prevent migration, - * but not necessarily preemption. - * - * Can be invoked nested like preempt_disable() and needs the corresponding - * number of migrate_enable() invocations. - */ -static __always_inline void migrate_disable(void) -{ - preempt_disable(); -} - -/** - * migrate_enable - Allow migration of the current task - * - * Counterpart to migrate_disable(). - * - * As migrate_disable() can be invoked nested, only the outermost invocation - * reenables migration. - * - * Currently mapped to preempt_enable(). - */ -static __always_inline void migrate_enable(void) -{ - preempt_enable(); -} - -#endif /* CONFIG_SMP && CONFIG_PREEMPT_RT */ +#endif /* CONFIG_SMP */ #endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 3af9d52fe093..a33f35f68060 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -715,7 +715,7 @@ struct task_struct { const cpumask_t *cpus_ptr; cpumask_t cpus_mask; void *migration_pending; -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT) +#ifdef CONFIG_SMP unsigned short migration_disabled; #endif unsigned short migration_flags; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e6473ecaab3c..c962922784d1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1728,8 +1728,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) #ifdef CONFIG_SMP -#ifdef CONFIG_PREEMPT_RT - static void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags); @@ -1800,8 +1798,6 @@ static inline bool rq_has_pinned_tasks(struct rq *rq) return rq->nr_pinned; } -#endif - /* * Per-CPU kthreads are allowed to run on !active && online CPUs, see * __set_cpus_allowed_ptr() and select_fallback_rq(). @@ -2882,7 +2878,7 @@ void sched_set_stop_task(int cpu, struct task_struct *stop) } } -#else +#else /* CONFIG_SMP */ static inline int __set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask, @@ -2891,10 +2887,6 @@ static inline int __set_cpus_allowed_ptr(struct task_struct *p, return set_cpus_allowed_ptr(p, new_mask); } -#endif /* CONFIG_SMP */ - -#if !defined(CONFIG_SMP) || !defined(CONFIG_PREEMPT_RT) - static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } static inline bool rq_has_pinned_tasks(struct rq *rq) @@ -2902,7 +2894,7 @@ static inline bool rq_has_pinned_tasks(struct rq *rq) return false; } -#endif +#endif /* !CONFIG_SMP */ static void ttwu_stat(struct task_struct *p, int cpu, int wake_flags) @@ -7924,6 +7916,39 @@ void __cant_sleep(const char *file, int line, int preempt_offset) add_taint(TAINT_WARN, LOCKDEP_STILL_OK); } EXPORT_SYMBOL_GPL(__cant_sleep); + +#ifdef CONFIG_SMP +void __cant_migrate(const char *file, int line) +{ + static unsigned long prev_jiffy; + + if (irqs_disabled()) + return; + + if (is_migration_disabled(current)) + return; + + if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) + return; + + if (preempt_count() > 0) + return; + + if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) + return; + prev_jiffy = jiffies; + + pr_err("BUG: assuming non migratable context at %s:%d\n", file, line); + pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n", + in_atomic(), irqs_disabled(), is_migration_disabled(current), + current->pid, current->comm); + + debug_show_held_locks(current); + dump_stack(); + add_taint(TAINT_WARN, LOCKDEP_STILL_OK); +} +EXPORT_SYMBOL_GPL(__cant_migrate); +#endif #endif #ifdef CONFIG_MAGIC_SYSRQ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 590e6f27068c..f5acb6c5ce49 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1056,7 +1056,7 @@ struct rq { struct cpuidle_state *idle_state; #endif -#if defined(CONFIG_PREEMPT_RT) && defined(CONFIG_SMP) +#ifdef CONFIG_SMP unsigned int nr_pinned; #endif unsigned int push_busy; @@ -1092,7 +1092,7 @@ static inline int cpu_of(struct rq *rq) static inline bool is_migration_disabled(struct task_struct *p) { -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT) +#ifdef CONFIG_SMP return p->migration_disabled; #else return false; diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index faaa927ac2c8..1c1dbd300325 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -26,7 +26,7 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2) if (current->nr_cpus_allowed == 1) goto out; -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT) +#ifdef CONFIG_SMP if (current->migration_disabled) goto out; #endif -- cgit From aa6159ab99a9ab5df835b4750b66cf132a5aa292 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 15 Dec 2020 20:42:48 -0800 Subject: kernel.h: split out mathematical helpers kernel.h is being used as a dump for all kinds of stuff for a long time. Here is the attempt to start cleaning it up by splitting out mathematical helpers. At the same time convert users in header and lib folder to use new header. Though for time being include new header back to kernel.h to avoid twisted indirected includes for existing users. [sfr@canb.auug.org.au: fix powerpc build] Link: https://lkml.kernel.org/r/20201029150809.13059608@canb.auug.org.au Link: https://lkml.kernel.org/r/20201028173212.41768-1-andriy.shevchenko@linux.intel.com Signed-off-by: Andy Shevchenko Cc: "Paul E. McKenney" Cc: Trond Myklebust Cc: Jeff Layton Cc: Rasmus Villemoes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/nfs/callback_proc.c | 5 ++ include/linux/bitops.h | 11 ++- include/linux/dcache.h | 1 + include/linux/iommu-helper.h | 4 +- include/linux/kernel.h | 173 +---------------------------------------- include/linux/math.h | 177 ++++++++++++++++++++++++++++++++++++++++++ include/linux/rcu_node_tree.h | 2 + include/linux/units.h | 2 +- lib/errname.c | 1 + lib/errseq.c | 1 + lib/find_bit.c | 3 +- lib/math/div64.c | 4 +- lib/math/int_pow.c | 2 +- lib/math/int_sqrt.c | 3 +- lib/math/reciprocal_div.c | 9 ++- 15 files changed, 215 insertions(+), 183 deletions(-) create mode 100644 include/linux/math.h (limited to 'include/linux/kernel.h') diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index e61dbc9b86ae..f7786e00a6a7 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -6,10 +6,15 @@ * * NFSv4 callback procedures */ + +#include +#include #include #include #include #include +#include + #include "nfs4_fs.h" #include "callback.h" #include "delegation.h" diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 5b74bdf159d6..a61f192c096b 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -1,9 +1,12 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BITOPS_H #define _LINUX_BITOPS_H + #include #include +#include + /* Set bits in the first 'n' bytes when loaded from memory */ #ifdef __LITTLE_ENDIAN # define aligned_byte_mask(n) ((1UL << 8*(n))-1) @@ -12,10 +15,10 @@ #endif #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) -#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) -#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u64)) -#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) -#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) +#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) +#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64)) +#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) +#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 6f95c3300cbb..d7b369fc15d3 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -4,6 +4,7 @@ #include #include +#include #include #include #include diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h index 70d01edcbf8b..74be34f3a20a 100644 --- a/include/linux/iommu-helper.h +++ b/include/linux/iommu-helper.h @@ -3,7 +3,9 @@ #define _LINUX_IOMMU_HELPER_H #include -#include +#include +#include +#include static inline unsigned long iommu_device_max_index(unsigned long size, unsigned long offset, diff --git a/include/linux/kernel.h b/include/linux/kernel.h index dbf6018fc312..f7902d8c1048 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -2,7 +2,6 @@ #ifndef _LINUX_KERNEL_H #define _LINUX_KERNEL_H - #include #include #include @@ -11,12 +10,14 @@ #include #include #include +#include #include #include #include #include + #include -#include + #include #define STACK_MAGIC 0xdeadbeef @@ -54,125 +55,11 @@ } \ ) -/* - * This looks more complex than it should be. But we need to - * get the type for the ~ right in round_down (it needs to be - * as wide as the result!), and we want to evaluate the macro - * arguments just once each. - */ -#define __round_mask(x, y) ((__typeof__(x))((y)-1)) -/** - * round_up - round up to next specified power of 2 - * @x: the value to round - * @y: multiple to round up to (must be a power of 2) - * - * Rounds @x up to next multiple of @y (which must be a power of 2). - * To perform arbitrary rounding up, use roundup() below. - */ -#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) -/** - * round_down - round down to next specified power of 2 - * @x: the value to round - * @y: multiple to round down to (must be a power of 2) - * - * Rounds @x down to next multiple of @y (which must be a power of 2). - * To perform arbitrary rounding down, use rounddown() below. - */ -#define round_down(x, y) ((x) & ~__round_mask(x, y)) - #define typeof_member(T, m) typeof(((T*)0)->m) -#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP - -#define DIV_ROUND_DOWN_ULL(ll, d) \ - ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) - -#define DIV_ROUND_UP_ULL(ll, d) \ - DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d)) - -#if BITS_PER_LONG == 32 -# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d) -#else -# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d) -#endif - -/** - * roundup - round up to the next specified multiple - * @x: the value to up - * @y: multiple to round up to - * - * Rounds @x up to next multiple of @y. If @y will always be a power - * of 2, consider using the faster round_up(). - */ -#define roundup(x, y) ( \ -{ \ - typeof(y) __y = y; \ - (((x) + (__y - 1)) / __y) * __y; \ -} \ -) -/** - * rounddown - round down to next specified multiple - * @x: the value to round - * @y: multiple to round down to - * - * Rounds @x down to next multiple of @y. If @y will always be a power - * of 2, consider using the faster round_down(). - */ -#define rounddown(x, y) ( \ -{ \ - typeof(x) __x = (x); \ - __x - (__x % (y)); \ -} \ -) - -/* - * Divide positive or negative dividend by positive or negative divisor - * and round to closest integer. Result is undefined for negative - * divisors if the dividend variable type is unsigned and for negative - * dividends if the divisor variable type is unsigned. - */ -#define DIV_ROUND_CLOSEST(x, divisor)( \ -{ \ - typeof(x) __x = x; \ - typeof(divisor) __d = divisor; \ - (((typeof(x))-1) > 0 || \ - ((typeof(divisor))-1) > 0 || \ - (((__x) > 0) == ((__d) > 0))) ? \ - (((__x) + ((__d) / 2)) / (__d)) : \ - (((__x) - ((__d) / 2)) / (__d)); \ -} \ -) -/* - * Same as above but for u64 dividends. divisor must be a 32-bit - * number. - */ -#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \ -{ \ - typeof(divisor) __d = divisor; \ - unsigned long long _tmp = (x) + (__d) / 2; \ - do_div(_tmp, __d); \ - _tmp; \ -} \ -) - -/* - * Multiplies an integer by a fraction, while avoiding unnecessary - * overflow or loss of precision. - */ -#define mult_frac(x, numer, denom)( \ -{ \ - typeof(x) quot = (x) / (denom); \ - typeof(x) rem = (x) % (denom); \ - (quot * (numer)) + ((rem * (numer)) / (denom)); \ -} \ -) - - #define _RET_IP_ (unsigned long)__builtin_return_address(0) #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) -#define sector_div(a, b) do_div(a, b) - /** * upper_32_bits - return bits 32-63 of a number * @n: the number we're accessing @@ -272,48 +159,6 @@ extern void __cant_migrate(const char *file, int line); #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) -/** - * abs - return absolute value of an argument - * @x: the value. If it is unsigned type, it is converted to signed type first. - * char is treated as if it was signed (regardless of whether it really is) - * but the macro's return type is preserved as char. - * - * Return: an absolute value of x. - */ -#define abs(x) __abs_choose_expr(x, long long, \ - __abs_choose_expr(x, long, \ - __abs_choose_expr(x, int, \ - __abs_choose_expr(x, short, \ - __abs_choose_expr(x, char, \ - __builtin_choose_expr( \ - __builtin_types_compatible_p(typeof(x), char), \ - (char)({ signed char __x = (x); __x<0?-__x:__x; }), \ - ((void)0))))))) - -#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \ - __builtin_types_compatible_p(typeof(x), signed type) || \ - __builtin_types_compatible_p(typeof(x), unsigned type), \ - ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other) - -/** - * reciprocal_scale - "scale" a value into range [0, ep_ro) - * @val: value - * @ep_ro: right open interval endpoint - * - * Perform a "reciprocal multiplication" in order to "scale" a value into - * range [0, @ep_ro), where the upper interval endpoint is right-open. - * This is useful, e.g. for accessing a index of an array containing - * @ep_ro elements, for example. Think of it as sort of modulus, only that - * the result isn't that of modulo. ;) Note that if initial input is a - * small value, then result will return 0. - * - * Return: a result based on @val in interval [0, @ep_ro). - */ -static inline u32 reciprocal_scale(u32 val, u32 ep_ro) -{ - return (u32)(((u64) val * ep_ro) >> 32); -} - #if defined(CONFIG_MMU) && \ (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)) #define might_fault() __might_fault(__FILE__, __LINE__) @@ -515,18 +360,6 @@ extern int __kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr); extern int func_ptr_is_kernel_text(void *ptr); -u64 int_pow(u64 base, unsigned int exp); -unsigned long int_sqrt(unsigned long); - -#if BITS_PER_LONG < 64 -u32 int_sqrt64(u64 x); -#else -static inline u32 int_sqrt64(u64 x) -{ - return (u32)int_sqrt(x); -} -#endif - #ifdef CONFIG_SMP extern unsigned int sysctl_oops_all_cpu_backtrace; #else diff --git a/include/linux/math.h b/include/linux/math.h new file mode 100644 index 000000000000..53674a327e39 --- /dev/null +++ b/include/linux/math.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MATH_H +#define _LINUX_MATH_H + +#include +#include + +/* + * This looks more complex than it should be. But we need to + * get the type for the ~ right in round_down (it needs to be + * as wide as the result!), and we want to evaluate the macro + * arguments just once each. + */ +#define __round_mask(x, y) ((__typeof__(x))((y)-1)) + +/** + * round_up - round up to next specified power of 2 + * @x: the value to round + * @y: multiple to round up to (must be a power of 2) + * + * Rounds @x up to next multiple of @y (which must be a power of 2). + * To perform arbitrary rounding up, use roundup() below. + */ +#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) + +/** + * round_down - round down to next specified power of 2 + * @x: the value to round + * @y: multiple to round down to (must be a power of 2) + * + * Rounds @x down to next multiple of @y (which must be a power of 2). + * To perform arbitrary rounding down, use rounddown() below. + */ +#define round_down(x, y) ((x) & ~__round_mask(x, y)) + +#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP + +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) + +#define DIV_ROUND_UP_ULL(ll, d) \ + DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d)) + +#if BITS_PER_LONG == 32 +# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d) +#else +# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d) +#endif + +/** + * roundup - round up to the next specified multiple + * @x: the value to up + * @y: multiple to round up to + * + * Rounds @x up to next multiple of @y. If @y will always be a power + * of 2, consider using the faster round_up(). + */ +#define roundup(x, y) ( \ +{ \ + typeof(y) __y = y; \ + (((x) + (__y - 1)) / __y) * __y; \ +} \ +) +/** + * rounddown - round down to next specified multiple + * @x: the value to round + * @y: multiple to round down to + * + * Rounds @x down to next multiple of @y. If @y will always be a power + * of 2, consider using the faster round_down(). + */ +#define rounddown(x, y) ( \ +{ \ + typeof(x) __x = (x); \ + __x - (__x % (y)); \ +} \ +) + +/* + * Divide positive or negative dividend by positive or negative divisor + * and round to closest integer. Result is undefined for negative + * divisors if the dividend variable type is unsigned and for negative + * dividends if the divisor variable type is unsigned. + */ +#define DIV_ROUND_CLOSEST(x, divisor)( \ +{ \ + typeof(x) __x = x; \ + typeof(divisor) __d = divisor; \ + (((typeof(x))-1) > 0 || \ + ((typeof(divisor))-1) > 0 || \ + (((__x) > 0) == ((__d) > 0))) ? \ + (((__x) + ((__d) / 2)) / (__d)) : \ + (((__x) - ((__d) / 2)) / (__d)); \ +} \ +) +/* + * Same as above but for u64 dividends. divisor must be a 32-bit + * number. + */ +#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \ +{ \ + typeof(divisor) __d = divisor; \ + unsigned long long _tmp = (x) + (__d) / 2; \ + do_div(_tmp, __d); \ + _tmp; \ +} \ +) + +/* + * Multiplies an integer by a fraction, while avoiding unnecessary + * overflow or loss of precision. + */ +#define mult_frac(x, numer, denom)( \ +{ \ + typeof(x) quot = (x) / (denom); \ + typeof(x) rem = (x) % (denom); \ + (quot * (numer)) + ((rem * (numer)) / (denom)); \ +} \ +) + +#define sector_div(a, b) do_div(a, b) + +/** + * abs - return absolute value of an argument + * @x: the value. If it is unsigned type, it is converted to signed type first. + * char is treated as if it was signed (regardless of whether it really is) + * but the macro's return type is preserved as char. + * + * Return: an absolute value of x. + */ +#define abs(x) __abs_choose_expr(x, long long, \ + __abs_choose_expr(x, long, \ + __abs_choose_expr(x, int, \ + __abs_choose_expr(x, short, \ + __abs_choose_expr(x, char, \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), char), \ + (char)({ signed char __x = (x); __x<0?-__x:__x; }), \ + ((void)0))))))) + +#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), signed type) || \ + __builtin_types_compatible_p(typeof(x), unsigned type), \ + ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other) + +/** + * reciprocal_scale - "scale" a value into range [0, ep_ro) + * @val: value + * @ep_ro: right open interval endpoint + * + * Perform a "reciprocal multiplication" in order to "scale" a value into + * range [0, @ep_ro), where the upper interval endpoint is right-open. + * This is useful, e.g. for accessing a index of an array containing + * @ep_ro elements, for example. Think of it as sort of modulus, only that + * the result isn't that of modulo. ;) Note that if initial input is a + * small value, then result will return 0. + * + * Return: a result based on @val in interval [0, @ep_ro). + */ +static inline u32 reciprocal_scale(u32 val, u32 ep_ro) +{ + return (u32)(((u64) val * ep_ro) >> 32); +} + +u64 int_pow(u64 base, unsigned int exp); +unsigned long int_sqrt(unsigned long); + +#if BITS_PER_LONG < 64 +u32 int_sqrt64(u64 x); +#else +static inline u32 int_sqrt64(u64 x) +{ + return (u32)int_sqrt(x); +} +#endif + +#endif /* _LINUX_MATH_H */ diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h index b8e094b125ee..78feb8ba7358 100644 --- a/include/linux/rcu_node_tree.h +++ b/include/linux/rcu_node_tree.h @@ -20,6 +20,8 @@ #ifndef __LINUX_RCU_NODE_TREE_H #define __LINUX_RCU_NODE_TREE_H +#include + /* * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and * CONFIG_RCU_FANOUT_LEAF. diff --git a/include/linux/units.h b/include/linux/units.h index aaf716364ec3..5c115c809507 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -2,7 +2,7 @@ #ifndef _LINUX_UNITS_H #define _LINUX_UNITS_H -#include +#include #define ABSOLUTE_ZERO_MILLICELSIUS -273150 diff --git a/lib/errname.c b/lib/errname.c index 0c4d3e66170e..05cbf731545f 100644 --- a/lib/errname.c +++ b/lib/errname.c @@ -3,6 +3,7 @@ #include #include #include +#include /* * Ensure these tables do not accidentally become gigantic if some diff --git a/lib/errseq.c b/lib/errseq.c index 81f9e33aa7e7..93e9b94358dc 100644 --- a/lib/errseq.c +++ b/lib/errseq.c @@ -3,6 +3,7 @@ #include #include #include +#include /* * An errseq_t is a way of recording errors in one place, and allowing any diff --git a/lib/find_bit.c b/lib/find_bit.c index 4a8751010d59..f67f86fd2f62 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -15,8 +15,9 @@ #include #include #include -#include +#include #include +#include #if !defined(find_next_bit) || !defined(find_next_zero_bit) || \ !defined(find_next_bit_le) || !defined(find_next_zero_bit_le) || \ diff --git a/lib/math/div64.c b/lib/math/div64.c index 3952a07130d8..064d68a5391a 100644 --- a/lib/math/div64.c +++ b/lib/math/div64.c @@ -18,9 +18,11 @@ * or by defining a preprocessor macro in arch/include/asm/div64.h. */ +#include #include -#include +#include #include +#include /* Not needed on 64bit architectures */ #if BITS_PER_LONG == 32 diff --git a/lib/math/int_pow.c b/lib/math/int_pow.c index 622fc1ab3c74..0cf426e69bda 100644 --- a/lib/math/int_pow.c +++ b/lib/math/int_pow.c @@ -6,7 +6,7 @@ */ #include -#include +#include #include /** diff --git a/lib/math/int_sqrt.c b/lib/math/int_sqrt.c index 30e0f9770f88..a8170bb9142f 100644 --- a/lib/math/int_sqrt.c +++ b/lib/math/int_sqrt.c @@ -6,9 +6,10 @@ * square root from Guy L. Steele. */ -#include #include #include +#include +#include /** * int_sqrt - computes the integer square root diff --git a/lib/math/reciprocal_div.c b/lib/math/reciprocal_div.c index 32436dd4171e..6cb4adbb81d2 100644 --- a/lib/math/reciprocal_div.c +++ b/lib/math/reciprocal_div.c @@ -1,10 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 +#include #include -#include -#include -#include #include +#include +#include #include +#include + +#include /* * For a description of the algorithm please have a look at -- cgit