diff options
| author | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 |
|---|---|---|
| committer | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 |
| commit | 1ac731c529cd4d6adbce134754b51ff7d822b145 (patch) | |
| tree | 143ab3f35ca5f3b69f583c84e6964b17139c2ec1 /kernel/kcsan/core.c | |
| parent | 07b4c950f27bef0362dc6ad7ee713aab61d58149 (diff) | |
| parent | 54116d442e001e1b6bd482122043b1870998a1f3 (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 6.6 merge window.
Diffstat (limited to 'kernel/kcsan/core.c')
| -rw-r--r-- | kernel/kcsan/core.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 54d077e1a2dc..5a60cc52adc0 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -337,11 +337,20 @@ static void delay_access(int type) */ static __always_inline u64 read_instrumented_memory(const volatile void *ptr, size_t size) { + /* + * In the below we don't necessarily need the read of the location to + * be atomic, and we don't use READ_ONCE(), since all we need for race + * detection is to observe 2 different values. + * + * Furthermore, on certain architectures (such as arm64), READ_ONCE() + * may turn into more complex instructions than a plain load that cannot + * do unaligned accesses. + */ switch (size) { - case 1: return READ_ONCE(*(const u8 *)ptr); - case 2: return READ_ONCE(*(const u16 *)ptr); - case 4: return READ_ONCE(*(const u32 *)ptr); - case 8: return READ_ONCE(*(const u64 *)ptr); + case 1: return *(const volatile u8 *)ptr; + case 2: return *(const volatile u16 *)ptr; + case 4: return *(const volatile u32 *)ptr; + case 8: return *(const volatile u64 *)ptr; default: return 0; /* Ignore; we do not diff the values. */ } } |