aboutsummaryrefslogtreecommitdiff
path: root/include/linux/atomic/atomic-instrumented.h
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2023-06-05 08:01:20 +0100
committerPeter Zijlstra <peterz@infradead.org>2023-06-05 09:57:22 +0200
commit1d78814d41701c216e28fcf2656526146dec4a1a (patch)
tree5849192c902cf353cded8c01776f123d66645d30 /include/linux/atomic/atomic-instrumented.h
parent630399469ffcb937936644fbaa5daf61e700a329 (diff)
locking/atomic: scripts: simplify raw_atomic*() definitions
Currently each ordering variant has several potential definitions, with a mixture of preprocessor and C definitions, including several copies of its C prototype, e.g. | #if defined(arch_atomic_fetch_andnot_acquire) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire | #elif defined(arch_atomic_fetch_andnot_relaxed) | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | int ret = arch_atomic_fetch_andnot_relaxed(i, v); | __atomic_acquire_fence(); | return ret; | } | #elif defined(arch_atomic_fetch_andnot) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot | #else | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | return raw_atomic_fetch_and_acquire(~i, v); | } | #endif Make this a bit simpler by defining the C prototype once, and writing the various potential definitions as plain C code guarded by ifdeffery. For example, the above becomes: | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | #if defined(arch_atomic_fetch_andnot_acquire) | return arch_atomic_fetch_andnot_acquire(i, v); | #elif defined(arch_atomic_fetch_andnot_relaxed) | int ret = arch_atomic_fetch_andnot_relaxed(i, v); | __atomic_acquire_fence(); | return ret; | #elif defined(arch_atomic_fetch_andnot) | return arch_atomic_fetch_andnot(i, v); | #else | return raw_atomic_fetch_and_acquire(~i, v); | #endif | } Which is far easier to read. As we now always have a single copy of the C prototype wrapping all the potential definitions, we now have an obvious single location for kerneldoc comments. At the same time, the fallbacks for raw_atomic*_xhcg() are made to use 'new' rather than 'i' as the name of the new value. This is what the existing fallback template used, and is more consistent with the raw_atomic{_try,}cmpxchg() fallbacks. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20230605070124.3741859-24-mark.rutland@arm.com
Diffstat (limited to 'include/linux/atomic/atomic-instrumented.h')
-rw-r--r--include/linux/atomic/atomic-instrumented.h50
1 files changed, 25 insertions, 25 deletions
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
index 90ee2f55af77..5491c89dc03a 100644
--- a/include/linux/atomic/atomic-instrumented.h
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -462,33 +462,33 @@ atomic_fetch_xor_relaxed(int i, atomic_t *v)
}
static __always_inline int
-atomic_xchg(atomic_t *v, int i)
+atomic_xchg(atomic_t *v, int new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return raw_atomic_xchg(v, i);
+ return raw_atomic_xchg(v, new);
}
static __always_inline int
-atomic_xchg_acquire(atomic_t *v, int i)
+atomic_xchg_acquire(atomic_t *v, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return raw_atomic_xchg_acquire(v, i);
+ return raw_atomic_xchg_acquire(v, new);
}
static __always_inline int
-atomic_xchg_release(atomic_t *v, int i)
+atomic_xchg_release(atomic_t *v, int new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return raw_atomic_xchg_release(v, i);
+ return raw_atomic_xchg_release(v, new);
}
static __always_inline int
-atomic_xchg_relaxed(atomic_t *v, int i)
+atomic_xchg_relaxed(atomic_t *v, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return raw_atomic_xchg_relaxed(v, i);
+ return raw_atomic_xchg_relaxed(v, new);
}
static __always_inline int
@@ -1103,33 +1103,33 @@ atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
}
static __always_inline s64
-atomic64_xchg(atomic64_t *v, s64 i)
+atomic64_xchg(atomic64_t *v, s64 new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return raw_atomic64_xchg(v, i);
+ return raw_atomic64_xchg(v, new);
}
static __always_inline s64
-atomic64_xchg_acquire(atomic64_t *v, s64 i)
+atomic64_xchg_acquire(atomic64_t *v, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return raw_atomic64_xchg_acquire(v, i);
+ return raw_atomic64_xchg_acquire(v, new);
}
static __always_inline s64
-atomic64_xchg_release(atomic64_t *v, s64 i)
+atomic64_xchg_release(atomic64_t *v, s64 new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return raw_atomic64_xchg_release(v, i);
+ return raw_atomic64_xchg_release(v, new);
}
static __always_inline s64
-atomic64_xchg_relaxed(atomic64_t *v, s64 i)
+atomic64_xchg_relaxed(atomic64_t *v, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return raw_atomic64_xchg_relaxed(v, i);
+ return raw_atomic64_xchg_relaxed(v, new);
}
static __always_inline s64
@@ -1744,33 +1744,33 @@ atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
}
static __always_inline long
-atomic_long_xchg(atomic_long_t *v, long i)
+atomic_long_xchg(atomic_long_t *v, long new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return raw_atomic_long_xchg(v, i);
+ return raw_atomic_long_xchg(v, new);
}
static __always_inline long
-atomic_long_xchg_acquire(atomic_long_t *v, long i)
+atomic_long_xchg_acquire(atomic_long_t *v, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return raw_atomic_long_xchg_acquire(v, i);
+ return raw_atomic_long_xchg_acquire(v, new);
}
static __always_inline long
-atomic_long_xchg_release(atomic_long_t *v, long i)
+atomic_long_xchg_release(atomic_long_t *v, long new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return raw_atomic_long_xchg_release(v, i);
+ return raw_atomic_long_xchg_release(v, new);
}
static __always_inline long
-atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+atomic_long_xchg_relaxed(atomic_long_t *v, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return raw_atomic_long_xchg_relaxed(v, i);
+ return raw_atomic_long_xchg_relaxed(v, new);
}
static __always_inline long
@@ -2231,4 +2231,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
-// f6502977180430e61c1a7c4e5e665f04f501fb8d
+// a4c3d2b229f907654cc53cb5d40e80f7fed1ec9c