diff options
Diffstat (limited to 'lib')
118 files changed, 6278 insertions, 3854 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 2d90935d5a21..d33a268bc256 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -70,9 +70,6 @@ source "lib/math/Kconfig" config NO_GENERIC_PCI_IOPORT_MAP bool -config GENERIC_PCI_IOMAP - bool - config GENERIC_IOMAP bool select GENERIC_PCI_IOMAP @@ -631,7 +628,7 @@ config SIGNATURE Implementation is done using GnuPG MPI library config DIMLIB - bool + tristate help Dynamic Interrupt Moderation library. Implements an algorithm for dynamically changing CQ moderation values @@ -713,10 +710,20 @@ config ARCH_STACKWALK config STACKDEPOT bool select STACKTRACE + help + Stack depot: stack trace storage that avoids duplication config STACKDEPOT_ALWAYS_INIT bool select STACKDEPOT + help + Always initialize stack depot during early boot + +config STACKDEPOT_MAX_FRAMES + int "Maximum number of frames in trace saved in stack depot" + range 1 256 + default 64 + depends on STACKDEPOT config REF_TRACKER bool @@ -772,3 +779,6 @@ config ASN1_ENCODER config POLYNOMIAL tristate + +config FIRMWARE_TABLE + bool diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index cc7d53d9dc01..8bba448c819b 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -231,9 +231,10 @@ config DEBUG_INFO in the "Debug information" choice below, indicating that debug information will be generated for build targets. -# Clang is known to generate .{s,u}leb128 with symbol deltas with DWARF5, which -# some targets may not support: https://sourceware.org/bugzilla/show_bug.cgi?id=27215 -config AS_HAS_NON_CONST_LEB128 +# Clang generates .uleb128 with label differences for DWARF v5, a feature that +# older binutils ports do not support when utilizing RISC-V style linker +# relaxation: https://sourceware.org/bugzilla/show_bug.cgi?id=27215 +config AS_HAS_NON_CONST_ULEB128 def_bool $(as-instr,.uleb128 .Lexpr_end4 - .Lexpr_start3\n.Lexpr_start3:\n.Lexpr_end4:) choice @@ -258,7 +259,7 @@ config DEBUG_INFO_NONE config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT bool "Rely on the toolchain's implicit default DWARF version" select DEBUG_INFO - depends on !CC_IS_CLANG || AS_IS_LLVM || CLANG_VERSION < 140000 || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_LEB128) + depends on !CC_IS_CLANG || AS_IS_LLVM || CLANG_VERSION < 140000 || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_ULEB128) help The implicit default version of DWARF debug info produced by a toolchain changes over time. @@ -282,7 +283,8 @@ config DEBUG_INFO_DWARF4 config DEBUG_INFO_DWARF5 bool "Generate DWARF Version 5 debuginfo" select DEBUG_INFO - depends on !CC_IS_CLANG || AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_LEB128) + depends on !ARCH_HAS_BROKEN_DWARF5 + depends on !CC_IS_CLANG || AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_ULEB128) help Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc 5.0+ accepts the -gdwarf-5 flag but only had partial support for some @@ -373,11 +375,13 @@ config DEBUG_INFO_SPLIT Incompatible with older versions of ccache. config DEBUG_INFO_BTF - bool "Generate BTF typeinfo" + bool "Generate BTF type information" depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST depends on BPF_SYSCALL depends on !DEBUG_INFO_DWARF5 || PAHOLE_VERSION >= 121 + # pahole uses elfutils, which does not have support for Hexagon relocations + depends on !HEXAGON help Generate deduplicated BTF type information from DWARF debug info. Turning this on expects presence of pahole tool, which will convert @@ -404,7 +408,8 @@ config PAHOLE_HAS_LANG_EXCLUDE using DEBUG_INFO_BTF_MODULES. config DEBUG_INFO_BTF_MODULES - def_bool y + bool "Generate BTF type information for kernel modules" + default y depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF help Generate compact split BTF type information for kernel modules. @@ -763,6 +768,8 @@ config DEBUG_STACK_USAGE help Enables the display of the minimum amount of free stack which each task has ever had available in the sysrq-T and sysrq-P debug output. + Also emits a message to dmesg when a process exits if that process + used more stack space than previously exiting processes. This option will slow down process creation somewhat. @@ -1023,6 +1030,20 @@ config SOFTLOCKUP_DETECTOR chance to run. The current stack trace is displayed upon detection and the system will stay locked up. +config SOFTLOCKUP_DETECTOR_INTR_STORM + bool "Detect Interrupt Storm in Soft Lockups" + depends on SOFTLOCKUP_DETECTOR && IRQ_TIME_ACCOUNTING + select GENERIC_IRQ_STAT_SNAPSHOT + default y if NR_CPUS <= 128 + help + Say Y here to enable the kernel to detect interrupt storm + during "soft lockups". + + "soft lockups" can be caused by a variety of reasons. If one is + caused by an interrupt storm, then the storming interrupts will not + be on the callstack. To detect this case, it is necessary to report + the CPU stats and the interrupt counts during the "soft lockups". + config BOOTPARAM_SOFTLOCKUP_PANIC bool "Panic (Reboot) On Soft Lockups" depends on SOFTLOCKUP_DETECTOR @@ -1244,7 +1265,7 @@ config SCHED_INFO config SCHEDSTATS bool "Collect scheduler statistics" - depends on DEBUG_KERNEL && PROC_FS + depends on PROC_FS select SCHED_INFO help If you say Y here, additional code will be inserted into the @@ -1297,7 +1318,7 @@ config PROVE_LOCKING select DEBUG_SPINLOCK select DEBUG_MUTEXES if !PREEMPT_RT select DEBUG_RT_MUTEXES if RT_MUTEXES - select DEBUG_RWSEMS + select DEBUG_RWSEMS if !PREEMPT_RT select DEBUG_WW_MUTEX_SLOWPATH select DEBUG_LOCK_ALLOC select PREEMPT_COUNT if !ARCH_NO_PREEMPT @@ -1420,7 +1441,7 @@ config DEBUG_WW_MUTEX_SLOWPATH config DEBUG_RWSEMS bool "RW Semaphore debugging: basic checks" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !PREEMPT_RT help This debugging feature allows mismatched rw semaphore locks and unlocks to be detected and reported. @@ -1739,21 +1760,6 @@ config DEBUG_MAPLE_TREE endmenu -config DEBUG_CREDENTIALS - bool "Debug credential management" - depends on DEBUG_KERNEL - help - Enable this to turn on some debug checking for credential - management. The additional code keeps track of the number of - pointers from task_structs to any given cred struct, and checks to - see that this number never exceeds the usage count of the cred - struct. - - Furthermore, if SELinux is enabled, this also checks that the - security pointer in the cred struct is never seen to be invalid. - - If unsure, say N. - source "kernel/rcu/Kconfig.debug" config DEBUG_WQ_FORCE_RR_CPU @@ -1985,7 +1991,6 @@ config FAULT_INJECTION config FAILSLAB bool "Fault-injection capability for kmalloc" depends on FAULT_INJECTION - depends on SLAB || SLUB help Provide fault-injection capability for kmalloc. @@ -2095,7 +2100,7 @@ config KCOV depends on ARCH_HAS_KCOV depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \ - GCC_VERSION >= 120000 || CLANG_VERSION >= 130000 + GCC_VERSION >= 120000 || CC_IS_CLANG select DEBUG_FS select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC select OBJTOOL if HAVE_NOINSTR_HACK @@ -2103,10 +2108,6 @@ config KCOV KCOV exposes kernel code coverage information in a form suitable for coverage-guided fuzzing (randomized testing). - If RANDOMIZE_BASE is enabled, PC values will not be stable across - different machines and across reboots. If you need stable PC values, - disable RANDOMIZE_BASE. - For more details, see Documentation/dev-tools/kcov.rst. config KCOV_ENABLE_COMPARISONS @@ -2141,7 +2142,7 @@ config KCOV_IRQ_AREA_SIZE menuconfig RUNTIME_TESTING_MENU bool "Runtime Testing" - def_bool y + default y if RUNTIME_TESTING_MENU @@ -2156,7 +2157,7 @@ config TEST_DHRY To run the benchmark, it needs to be enabled explicitly, either from the kernel command line (when built-in), or from userspace (when - built-in or modular. + built-in or modular). Run once during kernel boot: @@ -2249,6 +2250,7 @@ config TEST_DIV64 config TEST_IOV_ITER tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS depends on KUNIT + depends on MMU default KUNIT_ALL_TESTS help Enable this to turn on testing of the operation of the I/O iterator @@ -2366,11 +2368,15 @@ config ASYNC_RAID6_TEST config TEST_HEXDUMP tristate "Test functions located in the hexdump module at runtime" -config STRING_SELFTEST - tristate "Test string functions at runtime" +config STRING_KUNIT_TEST + tristate "KUnit test string functions at runtime" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS -config TEST_STRING_HELPERS - tristate "Test functions located in the string_helpers module at runtime" +config STRING_HELPERS_KUNIT_TEST + tristate "KUnit test string helpers at runtime" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS config TEST_KSTRTOX tristate "Test kstrto*() family of functions at runtime" @@ -2712,18 +2718,6 @@ config MEMCPY_KUNIT_TEST If unsure, say N. -config MEMCPY_SLOW_KUNIT_TEST - bool "Include exhaustive memcpy tests" - depends on MEMCPY_KUNIT_TEST - default y - help - Some memcpy tests are quite exhaustive in checking for overlaps - and bit ranges. These can be very slow, so they are split out - as a separate config, in case they need to be disabled. - - Note this config option will be replaced by the use of KUnit test - attributes. - config IS_SIGNED_TYPE_KUNIT_TEST tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS depends on KUNIT @@ -2762,7 +2756,7 @@ config STACKINIT_KUNIT_TEST config FORTIFY_KUNIT_TEST tristate "Test fortified str*() and mem*() function internals at runtime" if !KUNIT_ALL_TESTS - depends on KUNIT && FORTIFY_SOURCE + depends on KUNIT default KUNIT_ALL_TESTS help Builds unit tests for checking internals of FORTIFY_SOURCE as used @@ -2779,16 +2773,6 @@ config HW_BREAKPOINT_KUNIT_TEST If unsure, say N. -config STRCAT_KUNIT_TEST - tristate "Test strcat() family of functions at runtime" if !KUNIT_ALL_TESTS - depends on KUNIT - default KUNIT_ALL_TESTS - -config STRSCPY_KUNIT_TEST - tristate "Test strscpy*() family of functions at runtime" if !KUNIT_ALL_TESTS - depends on KUNIT - default KUNIT_ALL_TESTS - config SIPHASH_KUNIT_TEST tristate "Perform selftest on siphash functions" if !KUNIT_ALL_TESTS depends on KUNIT @@ -2871,28 +2855,6 @@ config TEST_MEMCAT_P If unsure, say N. -config TEST_LIVEPATCH - tristate "Test livepatching" - default n - depends on DYNAMIC_DEBUG - depends on LIVEPATCH - depends on m - help - Test kernel livepatching features for correctness. The tests will - load test modules that will be livepatched in various scenarios. - - To run all the livepatching tests: - - make -C tools/testing/selftests TARGETS=livepatch run_tests - - Alternatively, individual tests may be invoked: - - tools/testing/selftests/livepatch/test-callbacks.sh - tools/testing/selftests/livepatch/test-livepatch.sh - tools/testing/selftests/livepatch/test-shadow-vars.sh - - If unsure, say N. - config TEST_OBJAGG tristate "Perform selftest on object aggreration manager" default n diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index fdca89c05745..98016e137b7f 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -37,7 +37,7 @@ menuconfig KASAN (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \ CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \ HAVE_ARCH_KASAN_HW_TAGS - depends on (SLUB && SYSFS && !SLUB_TINY) || (SLAB && !DEBUG_SLAB) + depends on SYSFS && !SLUB_TINY select STACKDEPOT_ALWAYS_INIT help Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety @@ -78,7 +78,7 @@ config KASAN_GENERIC bool "Generic KASAN" depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS - select SLUB_DEBUG if SLUB + select SLUB_DEBUG select CONSTRUCTORS help Enables Generic KASAN. @@ -89,13 +89,11 @@ config KASAN_GENERIC overhead of ~50% for dynamic allocations. The performance slowdown is ~x3. - (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.) - config KASAN_SW_TAGS bool "Software Tag-Based KASAN" depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS - select SLUB_DEBUG if SLUB + select SLUB_DEBUG select CONSTRUCTORS help Enables Software Tag-Based KASAN. @@ -110,12 +108,9 @@ config KASAN_SW_TAGS May potentially introduce problems related to pointer casting and comparison, as it embeds a tag into the top byte of each pointer. - (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.) - config KASAN_HW_TAGS bool "Hardware Tag-Based KASAN" depends on HAVE_ARCH_KASAN_HW_TAGS - depends on SLUB help Enables Hardware Tag-Based KASAN. @@ -134,7 +129,7 @@ endchoice choice prompt "Instrumentation type" depends on KASAN_GENERIC || KASAN_SW_TAGS - default KASAN_OUTLINE + default KASAN_INLINE if !ARCH_DISABLE_KASAN_INLINE config KASAN_OUTLINE bool "Outline instrumentation" @@ -163,7 +158,7 @@ config KASAN_STACK out-of-bounds bugs in stack variables. With Clang, stack instrumentation has a problem that causes excessive - stack usage, see https://bugs.llvm.org/show_bug.cgi?id=38809. Thus, + stack usage, see https://llvm.org/pr38809. Thus, with Clang, this option is deemed unsafe. This option is always disabled when compile-testing with Clang to @@ -207,4 +202,25 @@ config KASAN_MODULE_TEST A part of the KASAN test suite that is not integrated with KUnit. Incompatible with Hardware Tag-Based KASAN. +config KASAN_EXTRA_INFO + bool "Record and report more information" + depends on KASAN + help + Record and report more information to help us find the cause of the + bug and to help us correlate the error with other system events. + + Currently, the CPU number and timestamp are additionally + recorded for each heap block at allocation and free time, and + 8 bytes will be added to each metadata structure that records + allocation or free information. + + In Generic KASAN, each kmalloc-8 and kmalloc-16 object will add + 16 bytes of additional memory consumption, and each kmalloc-32 + object will add 8 bytes of additional memory consumption, not + affecting other larger objects. + + In SW_TAGS KASAN and HW_TAGS KASAN, depending on the stack_ring_size + boot parameter, it will add 8 * stack_ring_size bytes of additional + memory consumption. + endif # KASAN diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence index 459dda9ef619..6fbbebec683a 100644 --- a/lib/Kconfig.kfence +++ b/lib/Kconfig.kfence @@ -5,7 +5,7 @@ config HAVE_ARCH_KFENCE menuconfig KFENCE bool "KFENCE: low-overhead sampling-based memory safety error detector" - depends on HAVE_ARCH_KFENCE && (SLAB || SLUB) + depends on HAVE_ARCH_KFENCE select STACKTRACE select IRQ_WORK help diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 3b9a44008433..b5c0e6576749 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -43,7 +43,7 @@ config KGDB_SERIAL_CONSOLE tristate "KGDB: use kgdb over the serial console" select CONSOLE_POLL select MAGIC_SYSRQ - depends on TTY && HW_CONSOLE + depends on TTY && VT default y help Share a serial console with kgdb. Sysrq-g must be used diff --git a/lib/Kconfig.kmsan b/lib/Kconfig.kmsan index ef2c8f256c57..0541d7b079cc 100644 --- a/lib/Kconfig.kmsan +++ b/lib/Kconfig.kmsan @@ -11,7 +11,7 @@ config HAVE_KMSAN_COMPILER config KMSAN bool "KMSAN: detector of uninitialized values use" depends on HAVE_ARCH_KMSAN && HAVE_KMSAN_COMPILER - depends on SLUB && DEBUG_KERNEL && !KASAN && !KCSAN + depends on DEBUG_KERNEL && !KASAN && !KCSAN depends on !PREEMPT_RT select STACKDEPOT select STACKDEPOT_ALWAYS_INIT diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index 59e21bfec188..e81e1ac4a919 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -config ARCH_HAS_UBSAN_SANITIZE_ALL +config ARCH_HAS_UBSAN bool menuconfig UBSAN @@ -87,7 +87,6 @@ config UBSAN_LOCAL_BOUNDS config UBSAN_SHIFT bool "Perform checking for bit-shift overflows" - default UBSAN depends on $(cc-option,-fsanitize=shift) help This option enables -fsanitize=shift which checks for bit-shift @@ -116,6 +115,22 @@ config UBSAN_UNREACHABLE This option enables -fsanitize=unreachable which checks for control flow reaching an expected-to-be-unreachable position. +config UBSAN_SIGNED_WRAP + bool "Perform checking for signed arithmetic wrap-around" + default UBSAN + depends on !COMPILE_TEST + # The no_sanitize attribute was introduced in GCC with version 8. + depends on !CC_IS_GCC || GCC_VERSION >= 80000 + depends on $(cc-option,-fsanitize=signed-integer-overflow) + help + This option enables -fsanitize=signed-integer-overflow which checks + for wrap-around of any arithmetic operations with signed integers. + This currently performs nearly no instrumentation due to the + kernel's use of -fno-strict-overflow which converts all would-be + arithmetic undefined behavior into wrap-around arithmetic. Future + sanitizer versions will allow for wrap-around checking (rather than + exclusively undefined behavior). + config UBSAN_BOOL bool "Perform checking for non-boolean values used as boolean" default UBSAN @@ -142,17 +157,6 @@ config UBSAN_ALIGNMENT Enabling this option on architectures that support unaligned accesses may produce a lot of false positives. -config UBSAN_SANITIZE_ALL - bool "Enable instrumentation for the entire kernel" - depends on ARCH_HAS_UBSAN_SANITIZE_ALL - default y - help - This option activates instrumentation for the entire kernel. - If you don't enable this option, you have to explicitly specify - UBSAN_SANITIZE := y for the files/directories you want to check for UB. - Enabling this option will get kernel image size increased - significantly. - config TEST_UBSAN tristate "Module for testing for undefined behavior detection" depends on m diff --git a/lib/Makefile b/lib/Makefile index 13455f47f9df..ed8dbf4436dd 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -49,9 +49,9 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \ percpu-refcount.o rhashtable.o base64.o \ once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \ generic-radix-tree.o bitmap-str.o -obj-$(CONFIG_STRING_SELFTEST) += test_string.o +obj-$(CONFIG_STRING_KUNIT_TEST) += string_kunit.o obj-y += string_helpers.o -obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o +obj-$(CONFIG_STRING_HELPERS_KUNIT_TEST) += string_helpers_kunit.o obj-y += hexdump.o obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o obj-y += kstrtox.o @@ -69,6 +69,7 @@ obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o obj-$(CONFIG_TEST_IDA) += test_ida.o obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla) +CFLAGS_test_ubsan.o += $(call cc-disable-warning, unused-but-set-variable) UBSAN_SANITIZE_test_ubsan.o := y obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o @@ -134,8 +135,6 @@ endif obj-$(CONFIG_TEST_FPU) += test_fpu.o CFLAGS_test_fpu.o += $(FPU_CFLAGS) -obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/ - # Some KUnit files (hooks.o) need to be built-in even when KUnit is a module, # so we can't just use obj-$(CONFIG_KUNIT). ifdef CONFIG_KUNIT @@ -153,7 +152,6 @@ CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any) obj-y += math/ crypto/ obj-$(CONFIG_GENERIC_IOMAP) += iomap.o -obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o @@ -238,6 +236,7 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o lib-$(CONFIG_GENERIC_BUG) += bug.o obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o +obj-$(CONFIG_ARCH_NEED_CMPXCHG_1_EMU) += cmpxchg-emu.o obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o #ensure exported functions have prototypes @@ -401,14 +400,16 @@ obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable) obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced) +CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread) +CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation) CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN) obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o -obj-$(CONFIG_STRCAT_KUNIT_TEST) += strcat_kunit.o -obj-$(CONFIG_STRSCPY_KUNIT_TEST) += strscpy_kunit.o obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o +obj-$(CONFIG_FIRMWARE_TABLE) += fw_table.o + # FORTIFY_SOURCE compile-time behavior tests TEST_FORTIFY_SRCS = $(wildcard $(srctree)/$(src)/test_fortify/*-*.c) TEST_FORTIFY_LOGS = $(patsubst $(srctree)/$(src)/%.c, %.log, $(TEST_FORTIFY_SRCS)) diff --git a/lib/assoc_array.c b/lib/assoc_array.c index ca0b4f360c1a..388e656ac974 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -938,7 +938,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit, edit->leaf_p = &new_n0->slots[0]; pr_devel("<--%s() = ok [split shortcut]\n", __func__); - return edit; + return true; } /** diff --git a/lib/bitmap.c b/lib/bitmap.c index 09522af227f1..b97692854966 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -348,6 +348,13 @@ unsigned int __bitmap_weight_and(const unsigned long *bitmap1, } EXPORT_SYMBOL(__bitmap_weight_and); +unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int bits) +{ + return BITMAP_WEIGHT(bitmap1[idx] & ~bitmap2[idx], bits); +} +EXPORT_SYMBOL(__bitmap_weight_andnot); + void __bitmap_set(unsigned long *map, unsigned int start, int len) { unsigned long *p = map + BIT_WORD(start); diff --git a/lib/bootconfig.c b/lib/bootconfig.c index c59d26068a64..97f8911ea339 100644 --- a/lib/bootconfig.c +++ b/lib/bootconfig.c @@ -61,9 +61,12 @@ static inline void * __init xbc_alloc_mem(size_t size) return memblock_alloc(size, SMP_CACHE_BYTES); } -static inline void __init xbc_free_mem(void *addr, size_t size) +static inline void __init xbc_free_mem(void *addr, size_t size, bool early) { - memblock_free(addr, size); + if (early) + memblock_free(addr, size); + else if (addr) + memblock_free_late(__pa(addr), size); } #else /* !__KERNEL__ */ @@ -73,7 +76,7 @@ static inline void *xbc_alloc_mem(size_t size) return malloc(size); } -static inline void xbc_free_mem(void *addr, size_t size) +static inline void xbc_free_mem(void *addr, size_t size, bool early) { free(addr); } @@ -898,19 +901,20 @@ static int __init xbc_parse_tree(void) } /** - * xbc_exit() - Clean up all parsed bootconfig + * _xbc_exit() - Clean up all parsed bootconfig + * @early: Set true if this is called before budy system is initialized. * * This clears all data structures of parsed bootconfig on memory. * If you need to reuse xbc_init() with new boot config, you can * use this. */ -void __init xbc_exit(void) +void __init _xbc_exit(bool early) { - xbc_free_mem(xbc_data, xbc_data_size); + xbc_free_mem(xbc_data, xbc_data_size, early); xbc_data = NULL; xbc_data_size = 0; xbc_node_num = 0; - xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX); + xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX, early); xbc_nodes = NULL; brace_index = 0; } @@ -963,7 +967,7 @@ int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos) if (!xbc_nodes) { if (emsg) *emsg = "Failed to allocate bootconfig nodes"; - xbc_exit(); + _xbc_exit(true); return -ENOMEM; } memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX); @@ -977,7 +981,7 @@ int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos) *epos = xbc_err_pos; if (emsg) *emsg = xbc_err_msg; - xbc_exit(); + _xbc_exit(true); } else ret = xbc_node_num; diff --git a/lib/buildid.c b/lib/buildid.c index e3a7acdeef0e..898301b49eb6 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -140,7 +140,7 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, return -EFAULT; /* page not mapped */ ret = -EINVAL; - page_addr = kmap_atomic(page); + page_addr = kmap_local_page(page); ehdr = (Elf32_Ehdr *)page_addr; /* compare magic x7f "ELF" */ @@ -156,7 +156,7 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ret = get_build_id_64(page_addr, build_id, size); out: - kunmap_atomic(page_addr); + kunmap_local(page_addr); put_page(page); return ret; } @@ -174,7 +174,7 @@ int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size) return parse_build_id_buf(build_id, NULL, buf, buf_size); } -#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE) +#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO) unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init; /** diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c index 0eed92b77ba3..404dba36bae3 100644 --- a/lib/checksum_kunit.c +++ b/lib/checksum_kunit.c @@ -1,15 +1,21 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Test cases csum_partial and csum_fold + * Test cases csum_partial, csum_fold, ip_fast_csum, csum_ipv6_magic */ #include <kunit/test.h> #include <asm/checksum.h> +#include <net/ip6_checksum.h> #define MAX_LEN 512 #define MAX_ALIGN 64 #define TEST_BUFLEN (MAX_LEN + MAX_ALIGN) +#define IPv4_MIN_WORDS 5 +#define IPv4_MAX_WORDS 15 +#define NUM_IPv6_TESTS 200 +#define NUM_IP_FAST_CSUM_TESTS 181 + /* Values for a little endian CPU. Byte swap each half on big endian CPU. */ static const u32 random_init_sum = 0x2847aab; static const u8 random_buf[] = { @@ -209,6 +215,237 @@ static const u32 init_sums_no_overflow[] = { 0xffff0000, 0xfffffffb, }; +static const u16 expected_csum_ipv6_magic[] = { + 0x18d4, 0x3085, 0x2e4b, 0xd9f4, 0xbdc8, 0x78f, 0x1034, 0x8422, 0x6fc0, + 0xd2f6, 0xbeb5, 0x9d3, 0x7e2a, 0x312e, 0x778e, 0xc1bb, 0x7cf2, 0x9d1e, + 0xca21, 0xf3ff, 0x7569, 0xb02e, 0xca86, 0x7e76, 0x4539, 0x45e3, 0xf28d, + 0xdf81, 0x8fd5, 0x3b5d, 0x8324, 0xf471, 0x83be, 0x1daf, 0x8c46, 0xe682, + 0xd1fb, 0x6b2e, 0xe687, 0x2a33, 0x4833, 0x2d67, 0x660f, 0x2e79, 0xd65e, + 0x6b62, 0x6672, 0x5dbd, 0x8680, 0xbaa5, 0x2229, 0x2125, 0x2d01, 0x1cc0, + 0x6d36, 0x33c0, 0xee36, 0xd832, 0x9820, 0x8a31, 0x53c5, 0x2e2, 0xdb0e, + 0x49ed, 0x17a7, 0x77a0, 0xd72e, 0x3d72, 0x7dc8, 0x5b17, 0xf55d, 0xa4d9, + 0x1446, 0x5d56, 0x6b2e, 0x69a5, 0xadb6, 0xff2a, 0x92e, 0xe044, 0x3402, + 0xbb60, 0xec7f, 0xe7e6, 0x1986, 0x32f4, 0x8f8, 0x5e00, 0x47c6, 0x3059, + 0x3969, 0xe957, 0x4388, 0x2854, 0x3334, 0xea71, 0xa6de, 0x33f9, 0x83fc, + 0x37b4, 0x5531, 0x3404, 0x1010, 0xed30, 0x610a, 0xc95, 0x9aed, 0x6ff, + 0x5136, 0x2741, 0x660e, 0x8b80, 0xf71, 0xa263, 0x88af, 0x7a73, 0x3c37, + 0x1908, 0x6db5, 0x2e92, 0x1cd2, 0x70c8, 0xee16, 0xe80, 0xcd55, 0x6e6, + 0x6434, 0x127, 0x655d, 0x2ea0, 0xb4f4, 0xdc20, 0x5671, 0xe462, 0xe52b, + 0xdb44, 0x3589, 0xc48f, 0xe60b, 0xd2d2, 0x66ad, 0x498, 0x436, 0xb917, + 0xf0ca, 0x1a6e, 0x1cb7, 0xbf61, 0x2870, 0xc7e8, 0x5b30, 0xe4a5, 0x168, + 0xadfc, 0xd035, 0xe690, 0xe283, 0xfb27, 0xe4ad, 0xb1a5, 0xf2d5, 0xc4b6, + 0x8a30, 0xd7d5, 0x7df9, 0x91d5, 0x63ed, 0x2d21, 0x312b, 0xab19, 0xa632, + 0x8d2e, 0xef06, 0x57b9, 0xc373, 0xbd1f, 0xa41f, 0x8444, 0x9975, 0x90cb, + 0xc49c, 0xe965, 0x4eff, 0x5a, 0xef6d, 0xe81a, 0xe260, 0x853a, 0xff7a, + 0x99aa, 0xb06b, 0xee19, 0xcc2c, 0xf34c, 0x7c49, 0xdac3, 0xa71e, 0xc988, + 0x3845, 0x1014 +}; + +static const u16 expected_fast_csum[] = { + 0xda83, 0x45da, 0x4f46, 0x4e4f, 0x34e, 0xe902, 0xa5e9, 0x87a5, 0x7187, + 0x5671, 0xf556, 0x6df5, 0x816d, 0x8f81, 0xbb8f, 0xfbba, 0x5afb, 0xbe5a, + 0xedbe, 0xabee, 0x6aac, 0xe6b, 0xea0d, 0x67ea, 0x7e68, 0x8a7e, 0x6f8a, + 0x3a70, 0x9f3a, 0xe89e, 0x75e8, 0x7976, 0xfa79, 0x2cfa, 0x3c2c, 0x463c, + 0x7146, 0x7a71, 0x547a, 0xfd53, 0x99fc, 0xb699, 0x92b6, 0xdb91, 0xe8da, + 0x5fe9, 0x1e60, 0xae1d, 0x39ae, 0xf439, 0xa1f4, 0xdda1, 0xede, 0x790f, + 0x579, 0x1206, 0x9012, 0x2490, 0xd224, 0x5cd2, 0xa65d, 0xca7, 0x220d, + 0xf922, 0xbf9, 0x920b, 0x1b92, 0x361c, 0x2e36, 0x4d2e, 0x24d, 0x2, + 0xcfff, 0x90cf, 0xa591, 0x93a5, 0x7993, 0x9579, 0xc894, 0x50c8, 0x5f50, + 0xd55e, 0xcad5, 0xf3c9, 0x8f4, 0x4409, 0x5043, 0x5b50, 0x55b, 0x2205, + 0x1e22, 0x801e, 0x3780, 0xe137, 0x7ee0, 0xf67d, 0x3cf6, 0xa53c, 0x2ea5, + 0x472e, 0x5147, 0xcf51, 0x1bcf, 0x951c, 0x1e95, 0xc71e, 0xe4c7, 0xc3e4, + 0x3dc3, 0xee3d, 0xa4ed, 0xf9a4, 0xcbf8, 0x75cb, 0xb375, 0x50b4, 0x3551, + 0xf835, 0x19f8, 0x8c1a, 0x538c, 0xad52, 0xa3ac, 0xb0a3, 0x5cb0, 0x6c5c, + 0x5b6c, 0xc05a, 0x92c0, 0x4792, 0xbe47, 0x53be, 0x1554, 0x5715, 0x4b57, + 0xe54a, 0x20e5, 0x21, 0xd500, 0xa1d4, 0xa8a1, 0x57a9, 0xca57, 0x5ca, + 0x1c06, 0x4f1c, 0xe24e, 0xd9e2, 0xf0d9, 0x4af1, 0x474b, 0x8146, 0xe81, + 0xfd0e, 0x84fd, 0x7c85, 0xba7c, 0x17ba, 0x4a17, 0x964a, 0xf595, 0xff5, + 0x5310, 0x3253, 0x6432, 0x4263, 0x2242, 0xe121, 0x32e1, 0xf632, 0xc5f5, + 0x21c6, 0x7d22, 0x8e7c, 0x418e, 0x5641, 0x3156, 0x7c31, 0x737c, 0x373, + 0x2503, 0xc22a, 0x3c2, 0x4a04, 0x8549, 0x5285, 0xa352, 0xe8a3, 0x6fe8, + 0x1a6f, 0x211a, 0xe021, 0x38e0, 0x7638, 0xf575, 0x9df5, 0x169e, 0xf116, + 0x23f1, 0xcd23, 0xece, 0x660f, 0x4866, 0x6a48, 0x716a, 0xee71, 0xa2ee, + 0xb8a2, 0x61b9, 0xa361, 0xf7a2, 0x26f7, 0x1127, 0x6611, 0xe065, 0x36e0, + 0x1837, 0x3018, 0x1c30, 0x721b, 0x3e71, 0xe43d, 0x99e4, 0x9e9a, 0xb79d, + 0xa9b7, 0xcaa, 0xeb0c, 0x4eb, 0x1305, 0x8813, 0xb687, 0xa9b6, 0xfba9, + 0xd7fb, 0xccd8, 0x2ecd, 0x652f, 0xae65, 0x3fae, 0x3a40, 0x563a, 0x7556, + 0x2776, 0x1228, 0xef12, 0xf9ee, 0xcef9, 0x56cf, 0xa956, 0x24a9, 0xba24, + 0x5fba, 0x665f, 0xf465, 0x8ff4, 0x6d8f, 0x346d, 0x5f34, 0x385f, 0xd137, + 0xb8d0, 0xacb8, 0x55ac, 0x7455, 0xe874, 0x89e8, 0xd189, 0xa0d1, 0xb2a0, + 0xb8b2, 0x36b8, 0x5636, 0xd355, 0x8d3, 0x1908, 0x2118, 0xc21, 0x990c, + 0x8b99, 0x158c, 0x7815, 0x9e78, 0x6f9e, 0x4470, 0x1d44, 0x341d, 0x2634, + 0x3f26, 0x793e, 0xc79, 0xcc0b, 0x26cc, 0xd126, 0x1fd1, 0xb41f, 0xb6b4, + 0x22b7, 0xa122, 0xa1, 0x7f01, 0x837e, 0x3b83, 0xaf3b, 0x6fae, 0x916f, + 0xb490, 0xffb3, 0xceff, 0x50cf, 0x7550, 0x7275, 0x1272, 0x2613, 0xaa26, + 0xd5aa, 0x7d5, 0x9607, 0x96, 0xb100, 0xf8b0, 0x4bf8, 0xdd4c, 0xeddd, + 0x98ed, 0x2599, 0x9325, 0xeb92, 0x8feb, 0xcc8f, 0x2acd, 0x392b, 0x3b39, + 0xcb3b, 0x6acb, 0xd46a, 0xb8d4, 0x6ab8, 0x106a, 0x2f10, 0x892f, 0x789, + 0xc806, 0x45c8, 0x7445, 0x3c74, 0x3a3c, 0xcf39, 0xd7ce, 0x58d8, 0x6e58, + 0x336e, 0x1034, 0xee10, 0xe9ed, 0xc2e9, 0x3fc2, 0xd53e, 0xd2d4, 0xead2, + 0x8fea, 0x2190, 0x1162, 0xbe11, 0x8cbe, 0x6d8c, 0xfb6c, 0x6dfb, 0xd36e, + 0x3ad3, 0xf3a, 0x870e, 0xc287, 0x53c3, 0xc54, 0x5b0c, 0x7d5a, 0x797d, + 0xec79, 0x5dec, 0x4d5e, 0x184e, 0xd618, 0x60d6, 0xb360, 0x98b3, 0xf298, + 0xb1f2, 0x69b1, 0xf969, 0xef9, 0xab0e, 0x21ab, 0xe321, 0x24e3, 0x8224, + 0x5481, 0x5954, 0x7a59, 0xff7a, 0x7dff, 0x1a7d, 0xa51a, 0x46a5, 0x6b47, + 0xe6b, 0x830e, 0xa083, 0xff9f, 0xd0ff, 0xffd0, 0xe6ff, 0x7de7, 0xc67d, + 0xd0c6, 0x61d1, 0x3a62, 0xc3b, 0x150c, 0x1715, 0x4517, 0x5345, 0x3954, + 0xdd39, 0xdadd, 0x32db, 0x6a33, 0xd169, 0x86d1, 0xb687, 0x3fb6, 0x883f, + 0xa487, 0x39a4, 0x2139, 0xbe20, 0xffbe, 0xedfe, 0x8ded, 0x368e, 0xc335, + 0x51c3, 0x9851, 0xf297, 0xd6f2, 0xb9d6, 0x95ba, 0x2096, 0xea1f, 0x76e9, + 0x4e76, 0xe04d, 0xd0df, 0x80d0, 0xa280, 0xfca2, 0x75fc, 0xef75, 0x32ef, + 0x6833, 0xdf68, 0xc4df, 0x76c4, 0xb77, 0xb10a, 0xbfb1, 0x58bf, 0x5258, + 0x4d52, 0x6c4d, 0x7e6c, 0xb67e, 0xccb5, 0x8ccc, 0xbe8c, 0xc8bd, 0x9ac8, + 0xa99b, 0x52a9, 0x2f53, 0xc30, 0x3e0c, 0xb83d, 0x83b7, 0x5383, 0x7e53, + 0x4f7e, 0xe24e, 0xb3e1, 0x8db3, 0x618e, 0xc861, 0xfcc8, 0x34fc, 0x9b35, + 0xaa9b, 0xb1aa, 0x5eb1, 0x395e, 0x8639, 0xd486, 0x8bd4, 0x558b, 0x2156, + 0xf721, 0x4ef6, 0x14f, 0x7301, 0xdd72, 0x49de, 0x894a, 0x9889, 0x8898, + 0x7788, 0x7b77, 0x637b, 0xb963, 0xabb9, 0x7cab, 0xc87b, 0x21c8, 0xcb21, + 0xdfca, 0xbfdf, 0xf2bf, 0x6af2, 0x626b, 0xb261, 0x3cb2, 0xc63c, 0xc9c6, + 0xc9c9, 0xb4c9, 0xf9b4, 0x91f9, 0x4091, 0x3a40, 0xcc39, 0xd1cb, 0x7ed1, + 0x537f, 0x6753, 0xa167, 0xba49, 0x88ba, 0x7789, 0x3877, 0xf037, 0xd3ef, + 0xb5d4, 0x55b6, 0xa555, 0xeca4, 0xa1ec, 0xb6a2, 0x7b7, 0x9507, 0xfd94, + 0x82fd, 0x5c83, 0x765c, 0x9676, 0x3f97, 0xda3f, 0x6fda, 0x646f, 0x3064, + 0x5e30, 0x655e, 0x6465, 0xcb64, 0xcdca, 0x4ccd, 0x3f4c, 0x243f, 0x6f24, + 0x656f, 0x6065, 0x3560, 0x3b36, 0xac3b, 0x4aac, 0x714a, 0x7e71, 0xda7e, + 0x7fda, 0xda7f, 0x6fda, 0xff6f, 0xc6ff, 0xedc6, 0xd4ed, 0x70d5, 0xeb70, + 0xa3eb, 0x80a3, 0xca80, 0x3fcb, 0x2540, 0xf825, 0x7ef8, 0xf87e, 0x73f8, + 0xb474, 0xb4b4, 0x92b5, 0x9293, 0x93, 0x3500, 0x7134, 0x9071, 0xfa8f, + 0x51fa, 0x1452, 0xba13, 0x7ab9, 0x957a, 0x8a95, 0x6e8a, 0x6d6e, 0x7c6d, + 0x447c, 0x9744, 0x4597, 0x8945, 0xef88, 0x8fee, 0x3190, 0x4831, 0x8447, + 0xa183, 0x1da1, 0xd41d, 0x2dd4, 0x4f2e, 0xc94e, 0xcbc9, 0xc9cb, 0x9ec9, + 0x319e, 0xd531, 0x20d5, 0x4021, 0xb23f, 0x29b2, 0xd828, 0xecd8, 0x5ded, + 0xfc5d, 0x4dfc, 0xd24d, 0x6bd2, 0x5f6b, 0xb35e, 0x7fb3, 0xee7e, 0x56ee, + 0xa657, 0x68a6, 0x8768, 0x7787, 0xb077, 0x4cb1, 0x764c, 0xb175, 0x7b1, + 0x3d07, 0x603d, 0x3560, 0x3e35, 0xb03d, 0xd6b0, 0xc8d6, 0xd8c8, 0x8bd8, + 0x3e8c, 0x303f, 0xd530, 0xf1d4, 0x42f1, 0xca42, 0xddca, 0x41dd, 0x3141, + 0x132, 0xe901, 0x8e9, 0xbe09, 0xe0bd, 0x2ce0, 0x862d, 0x3986, 0x9139, + 0x6d91, 0x6a6d, 0x8d6a, 0x1b8d, 0xac1b, 0xedab, 0x54ed, 0xc054, 0xcebf, + 0xc1ce, 0x5c2, 0x3805, 0x6038, 0x5960, 0xd359, 0xdd3, 0xbe0d, 0xafbd, + 0x6daf, 0x206d, 0x2c20, 0x862c, 0x8e86, 0xec8d, 0xa2ec, 0xa3a2, 0x51a3, + 0x8051, 0xfd7f, 0x91fd, 0xa292, 0xaf14, 0xeeae, 0x59ef, 0x535a, 0x8653, + 0x3986, 0x9539, 0xb895, 0xa0b8, 0x26a0, 0x2227, 0xc022, 0x77c0, 0xad77, + 0x46ad, 0xaa46, 0x60aa, 0x8560, 0x4785, 0xd747, 0x45d7, 0x2346, 0x5f23, + 0x25f, 0x1d02, 0x71d, 0x8206, 0xc82, 0x180c, 0x3018, 0x4b30, 0x4b, + 0x3001, 0x1230, 0x2d12, 0x8c2d, 0x148d, 0x4015, 0x5f3f, 0x3d5f, 0x6b3d, + 0x396b, 0x473a, 0xf746, 0x44f7, 0x8945, 0x3489, 0xcb34, 0x84ca, 0xd984, + 0xf0d9, 0xbcf0, 0x63bd, 0x3264, 0xf332, 0x45f3, 0x7346, 0x5673, 0xb056, + 0xd3b0, 0x4ad4, 0x184b, 0x7d18, 0x6c7d, 0xbb6c, 0xfeba, 0xe0fe, 0x10e1, + 0x5410, 0x2954, 0x9f28, 0x3a9f, 0x5a3a, 0xdb59, 0xbdc, 0xb40b, 0x1ab4, + 0x131b, 0x5d12, 0x6d5c, 0xe16c, 0xb0e0, 0x89b0, 0xba88, 0xbb, 0x3c01, + 0xe13b, 0x6fe1, 0x446f, 0xa344, 0x81a3, 0xfe81, 0xc7fd, 0x38c8, 0xb38, + 0x1a0b, 0x6d19, 0xf36c, 0x47f3, 0x6d48, 0xb76d, 0xd3b7, 0xd8d2, 0x52d9, + 0x4b53, 0xa54a, 0x34a5, 0xc534, 0x9bc4, 0xed9b, 0xbeed, 0x3ebe, 0x233e, + 0x9f22, 0x4a9f, 0x774b, 0x4577, 0xa545, 0x64a5, 0xb65, 0x870b, 0x487, + 0x9204, 0x5f91, 0xd55f, 0x35d5, 0x1a35, 0x71a, 0x7a07, 0x4e7a, 0xfc4e, + 0x1efc, 0x481f, 0x7448, 0xde74, 0xa7dd, 0x1ea7, 0xaa1e, 0xcfaa, 0xfbcf, + 0xedfb, 0x6eee, 0x386f, 0x4538, 0x6e45, 0xd96d, 0x11d9, 0x7912, 0x4b79, + 0x494b, 0x6049, 0xac5f, 0x65ac, 0x1366, 0x5913, 0xe458, 0x7ae4, 0x387a, + 0x3c38, 0xb03c, 0x76b0, 0x9376, 0xe193, 0x42e1, 0x7742, 0x6476, 0x3564, + 0x3c35, 0x6a3c, 0xcc69, 0x94cc, 0x5d95, 0xe5e, 0xee0d, 0x4ced, 0xce4c, + 0x52ce, 0xaa52, 0xdaaa, 0xe4da, 0x1de5, 0x4530, 0x5445, 0x3954, 0xb639, + 0x81b6, 0x7381, 0x1574, 0xc215, 0x10c2, 0x3f10, 0x6b3f, 0xe76b, 0x7be7, + 0xbc7b, 0xf7bb, 0x41f7, 0xcc41, 0x38cc, 0x4239, 0xa942, 0x4a9, 0xc504, + 0x7cc4, 0x437c, 0x6743, 0xea67, 0x8dea, 0xe88d, 0xd8e8, 0xdcd8, 0x17dd, + 0x5718, 0x958, 0xa609, 0x41a5, 0x5842, 0x159, 0x9f01, 0x269f, 0x5a26, + 0x405a, 0xc340, 0xb4c3, 0xd4b4, 0xf4d3, 0xf1f4, 0x39f2, 0xe439, 0x67e4, + 0x4168, 0xa441, 0xdda3, 0xdedd, 0x9df, 0xab0a, 0xa5ab, 0x9a6, 0xba09, + 0x9ab9, 0xad9a, 0x5ae, 0xe205, 0xece2, 0xecec, 0x14ed, 0xd614, 0x6bd5, + 0x916c, 0x3391, 0x6f33, 0x206f, 0x8020, 0x780, 0x7207, 0x2472, 0x8a23, + 0xb689, 0x3ab6, 0xf739, 0x97f6, 0xb097, 0xa4b0, 0xe6a4, 0x88e6, 0x2789, + 0xb28, 0x350b, 0x1f35, 0x431e, 0x1043, 0xc30f, 0x79c3, 0x379, 0x5703, + 0x3256, 0x4732, 0x7247, 0x9d72, 0x489d, 0xd348, 0xa4d3, 0x7ca4, 0xbf7b, + 0x45c0, 0x7b45, 0x337b, 0x4034, 0x843f, 0xd083, 0x35d0, 0x6335, 0x4d63, + 0xe14c, 0xcce0, 0xfecc, 0x35ff, 0x5636, 0xf856, 0xeef8, 0x2def, 0xfc2d, + 0x4fc, 0x6e04, 0xb66d, 0x78b6, 0xbb78, 0x3dbb, 0x9a3d, 0x839a, 0x9283, + 0x593, 0xd504, 0x23d5, 0x5424, 0xd054, 0x61d0, 0xdb61, 0x17db, 0x1f18, + 0x381f, 0x9e37, 0x679e, 0x1d68, 0x381d, 0x8038, 0x917f, 0x491, 0xbb04, + 0x23bb, 0x4124, 0xd41, 0xa30c, 0x8ba3, 0x8b8b, 0xc68b, 0xd2c6, 0xebd2, + 0x93eb, 0xbd93, 0x99bd, 0x1a99, 0xea19, 0x58ea, 0xcf58, 0x73cf, 0x1073, + 0x9e10, 0x139e, 0xea13, 0xcde9, 0x3ecd, 0x883f, 0xf89, 0x180f, 0x2a18, + 0x212a, 0xce20, 0x73ce, 0xf373, 0x60f3, 0xad60, 0x4093, 0x8e40, 0xb98e, + 0xbfb9, 0xf1bf, 0x8bf1, 0x5e8c, 0xe95e, 0x14e9, 0x4e14, 0x1c4e, 0x7f1c, + 0xe77e, 0x6fe7, 0xf26f, 0x13f2, 0x8b13, 0xda8a, 0x5fda, 0xea5f, 0x4eea, + 0xa84f, 0x88a8, 0x1f88, 0x2820, 0x9728, 0x5a97, 0x3f5b, 0xb23f, 0x70b2, + 0x2c70, 0x232d, 0xf623, 0x4f6, 0x905, 0x7509, 0xd675, 0x28d7, 0x9428, + 0x3794, 0xf036, 0x2bf0, 0xba2c, 0xedb9, 0xd7ed, 0x59d8, 0xed59, 0x4ed, + 0xe304, 0x18e3, 0x5c19, 0x3d5c, 0x753d, 0x6d75, 0x956d, 0x7f95, 0xc47f, + 0x83c4, 0xa84, 0x2e0a, 0x5f2e, 0xb95f, 0x77b9, 0x6d78, 0xf46d, 0x1bf4, + 0xed1b, 0xd6ed, 0xe0d6, 0x5e1, 0x3905, 0x5638, 0xa355, 0x99a2, 0xbe99, + 0xb4bd, 0x85b4, 0x2e86, 0x542e, 0x6654, 0xd765, 0x73d7, 0x3a74, 0x383a, + 0x2638, 0x7826, 0x7677, 0x9a76, 0x7e99, 0x2e7e, 0xea2d, 0xa6ea, 0x8a7, + 0x109, 0x3300, 0xad32, 0x5fad, 0x465f, 0x2f46, 0xc62f, 0xd4c5, 0xad5, + 0xcb0a, 0x4cb, 0xb004, 0x7baf, 0xe47b, 0x92e4, 0x8e92, 0x638e, 0x1763, + 0xc17, 0xf20b, 0x1ff2, 0x8920, 0x5889, 0xcb58, 0xf8cb, 0xcaf8, 0x84cb, + 0x9f84, 0x8a9f, 0x918a, 0x4991, 0x8249, 0xff81, 0x46ff, 0x5046, 0x5f50, + 0x725f, 0xf772, 0x8ef7, 0xe08f, 0xc1e0, 0x1fc2, 0x9e1f, 0x8b9d, 0x108b, + 0x411, 0x2b04, 0xb02a, 0x1fb0, 0x1020, 0x7a0f, 0x587a, 0x8958, 0xb188, + 0xb1b1, 0x49b2, 0xb949, 0x7ab9, 0x917a, 0xfc91, 0xe6fc, 0x47e7, 0xbc47, + 0x8fbb, 0xea8e, 0x34ea, 0x2635, 0x1726, 0x9616, 0xc196, 0xa6c1, 0xf3a6, + 0x11f3, 0x4811, 0x3e48, 0xeb3e, 0xf7ea, 0x1bf8, 0xdb1c, 0x8adb, 0xe18a, + 0x42e1, 0x9d42, 0x5d9c, 0x6e5d, 0x286e, 0x4928, 0x9a49, 0xb09c, 0xa6b0, + 0x2a7, 0xe702, 0xf5e6, 0x9af5, 0xf9b, 0x810f, 0x8080, 0x180, 0x1702, + 0x5117, 0xa650, 0x11a6, 0x1011, 0x550f, 0xd554, 0xbdd5, 0x6bbe, 0xc66b, + 0xfc7, 0x5510, 0x5555, 0x7655, 0x177, 0x2b02, 0x6f2a, 0xb70, 0x9f0b, + 0xcf9e, 0xf3cf, 0x3ff4, 0xcb40, 0x8ecb, 0x768e, 0x5277, 0x8652, 0x9186, + 0x9991, 0x5099, 0xd350, 0x93d3, 0x6d94, 0xe6d, 0x530e, 0x3153, 0xa531, + 0x64a5, 0x7964, 0x7c79, 0x467c, 0x1746, 0x3017, 0x3730, 0x538, 0x5, + 0x1e00, 0x5b1e, 0x955a, 0xae95, 0x3eaf, 0xff3e, 0xf8ff, 0xb2f9, 0xa1b3, + 0xb2a1, 0x5b2, 0xad05, 0x7cac, 0x2d7c, 0xd32c, 0x80d2, 0x7280, 0x8d72, + 0x1b8e, 0x831b, 0xac82, 0xfdac, 0xa7fd, 0x15a8, 0xd614, 0xe0d5, 0x7be0, + 0xb37b, 0x61b3, 0x9661, 0x9d95, 0xc79d, 0x83c7, 0xd883, 0xead7, 0xceb, + 0xf60c, 0xa9f5, 0x19a9, 0xa019, 0x8f9f, 0xd48f, 0x3ad5, 0x853a, 0x985, + 0x5309, 0x6f52, 0x1370, 0x6e13, 0xa96d, 0x98a9, 0x5198, 0x9f51, 0xb69f, + 0xa1b6, 0x2ea1, 0x672e, 0x2067, 0x6520, 0xaf65, 0x6eaf, 0x7e6f, 0xee7e, + 0x17ef, 0xa917, 0xcea8, 0x9ace, 0xff99, 0x5dff, 0xdf5d, 0x38df, 0xa39, + 0x1c0b, 0xe01b, 0x46e0, 0xcb46, 0x90cb, 0xba90, 0x4bb, 0x9104, 0x9d90, + 0xc89c, 0xf6c8, 0x6cf6, 0x886c, 0x1789, 0xbd17, 0x70bc, 0x7e71, 0x17e, + 0x1f01, 0xa01f, 0xbaa0, 0x14bb, 0xfc14, 0x7afb, 0xa07a, 0x3da0, 0xbf3d, + 0x48bf, 0x8c48, 0x968b, 0x9d96, 0xfd9d, 0x96fd, 0x9796, 0x6b97, 0xd16b, + 0xf4d1, 0x3bf4, 0x253c, 0x9125, 0x6691, 0xc166, 0x34c1, 0x5735, 0x1a57, + 0xdc19, 0x77db, 0x8577, 0x4a85, 0x824a, 0x9182, 0x7f91, 0xfd7f, 0xb4c3, + 0xb5b4, 0xb3b5, 0x7eb3, 0x617e, 0x4e61, 0xa4f, 0x530a, 0x3f52, 0xa33e, + 0x34a3, 0x9234, 0xf091, 0xf4f0, 0x1bf5, 0x311b, 0x9631, 0x6a96, 0x386b, + 0x1d39, 0xe91d, 0xe8e9, 0x69e8, 0x426a, 0xee42, 0x89ee, 0x368a, 0x2837, + 0x7428, 0x5974, 0x6159, 0x1d62, 0x7b1d, 0xf77a, 0x7bf7, 0x6b7c, 0x696c, + 0xf969, 0x4cf9, 0x714c, 0x4e71, 0x6b4e, 0x256c, 0x6e25, 0xe96d, 0x94e9, + 0x8f94, 0x3e8f, 0x343e, 0x4634, 0xb646, 0x97b5, 0x8997, 0xe8a, 0x900e, + 0x8090, 0xfd80, 0xa0fd, 0x16a1, 0xf416, 0xebf4, 0x95ec, 0x1196, 0x8911, + 0x3d89, 0xda3c, 0x9fd9, 0xd79f, 0x4bd7, 0x214c, 0x3021, 0x4f30, 0x994e, + 0x5c99, 0x6f5d, 0x326f, 0xab31, 0x6aab, 0xe969, 0x90e9, 0x1190, 0xff10, + 0xa2fe, 0xe0a2, 0x66e1, 0x4067, 0x9e3f, 0x2d9e, 0x712d, 0x8170, 0xd180, + 0xffd1, 0x25ff, 0x3826, 0x2538, 0x5f24, 0xc45e, 0x1cc4, 0xdf1c, 0x93df, + 0xc793, 0x80c7, 0x2380, 0xd223, 0x7ed2, 0xfc7e, 0x22fd, 0x7422, 0x1474, + 0xb714, 0x7db6, 0x857d, 0xa85, 0xa60a, 0x88a6, 0x4289, 0x7842, 0xc278, + 0xf7c2, 0xcdf7, 0x84cd, 0xae84, 0x8cae, 0xb98c, 0x1aba, 0x4d1a, 0x884c, + 0x4688, 0xcc46, 0xd8cb, 0x2bd9, 0xbe2b, 0xa2be, 0x72a2, 0xf772, 0xd2f6, + 0x75d2, 0xc075, 0xa3c0, 0x63a3, 0xae63, 0x8fae, 0x2a90, 0x5f2a, 0xef5f, + 0x5cef, 0xa05c, 0x89a0, 0x5e89, 0x6b5e, 0x736b, 0x773, 0x9d07, 0xe99c, + 0x27ea, 0x2028, 0xc20, 0x980b, 0x4797, 0x2848, 0x9828, 0xc197, 0x48c2, + 0x2449, 0x7024, 0x570, 0x3e05, 0xd3e, 0xf60c, 0xbbf5, 0x69bb, 0x3f6a, + 0x740, 0xf006, 0xe0ef, 0xbbe0, 0xadbb, 0x56ad, 0xcf56, 0xbfce, 0xa9bf, + 0x205b, 0x6920, 0xae69, 0x50ae, 0x2050, 0xf01f, 0x27f0, 0x9427, 0x8993, + 0x8689, 0x4087, 0x6e40, 0xb16e, 0xa1b1, 0xe8a1, 0x87e8, 0x6f88, 0xfe6f, + 0x4cfe, 0xe94d, 0xd5e9, 0x47d6, 0x3148, 0x5f31, 0xc35f, 0x13c4, 0xa413, + 0x5a5, 0x2405, 0xc223, 0x66c2, 0x3667, 0x5e37, 0x5f5e, 0x2f5f, 0x8c2f, + 0xe48c, 0xd0e4, 0x4d1, 0xd104, 0xe4d0, 0xcee4, 0xfcf, 0x480f, 0xa447, + 0x5ea4, 0xff5e, 0xbefe, 0x8dbe, 0x1d8e, 0x411d, 0x1841, 0x6918, 0x5469, + 0x1155, 0xc611, 0xaac6, 0x37ab, 0x2f37, 0xca2e, 0x87ca, 0xbd87, 0xabbd, + 0xb3ab, 0xcb4, 0xce0c, 0xfccd, 0xa5fd, 0x72a5, 0xf072, 0x83f0, 0xfe83, + 0x97fd, 0xc997, 0xb0c9, 0xadb0, 0xe6ac, 0x88e6, 0x1088, 0xbe10, 0x16be, + 0xa916, 0xa3a8, 0x46a3, 0x5447, 0xe953, 0x84e8, 0x2085, 0xa11f, 0xfa1, + 0xdd0f, 0xbedc, 0x5abe, 0x805a, 0xc97f, 0x6dc9, 0x826d, 0x4a82, 0x934a, + 0x5293, 0xd852, 0xd3d8, 0xadd3, 0xf4ad, 0xf3f4, 0xfcf3, 0xfefc, 0xcafe, + 0xb7ca, 0x3cb8, 0xa13c, 0x18a1, 0x1418, 0xea13, 0x91ea, 0xf891, 0x53f8, + 0xa254, 0xe9a2, 0x87ea, 0x4188, 0x1c41, 0xdc1b, 0xf5db, 0xcaf5, 0x45ca, + 0x6d45, 0x396d, 0xde39, 0x90dd, 0x1e91, 0x1e, 0x7b00, 0x6a7b, 0xa46a, + 0xc9a3, 0x9bc9, 0x389b, 0x1139, 0x5211, 0x1f52, 0xeb1f, 0xabeb, 0x48ab, + 0x9348, 0xb392, 0x17b3, 0x1618, 0x5b16, 0x175b, 0xdc17, 0xdedb, 0x1cdf, + 0xeb1c, 0xd1ea, 0x4ad2, 0xd4b, 0xc20c, 0x24c2, 0x7b25, 0x137b, 0x8b13, + 0x618b, 0xa061, 0xff9f, 0xfffe, 0x72ff, 0xf572, 0xe2f5, 0xcfe2, 0xd2cf, + 0x75d3, 0x6a76, 0xc469, 0x1ec4, 0xfc1d, 0x59fb, 0x455a, 0x7a45, 0xa479, + 0xb7a4 +}; + static u8 tmp_buf[TEST_BUFLEN]; #define full_csum(buff, len, sum) csum_fold(csum_partial(buff, len, sum)) @@ -338,10 +575,59 @@ static void test_csum_no_carry_inputs(struct kunit *test) } } +static void test_ip_fast_csum(struct kunit *test) +{ + __sum16 csum_result; + u16 expected; + + for (int len = IPv4_MIN_WORDS; len < IPv4_MAX_WORDS; len++) { + for (int index = 0; index < NUM_IP_FAST_CSUM_TESTS; index++) { + csum_result = ip_fast_csum(random_buf + index, len); + expected = + expected_fast_csum[(len - IPv4_MIN_WORDS) * + NUM_IP_FAST_CSUM_TESTS + + index]; + CHECK_EQ(to_sum16(expected), csum_result); + } + } +} + +static void test_csum_ipv6_magic(struct kunit *test) +{ + const struct in6_addr *saddr; + const struct in6_addr *daddr; + unsigned int len; + unsigned char proto; + __wsum csum; + + if (!IS_ENABLED(CONFIG_NET)) + return; + + const int daddr_offset = sizeof(struct in6_addr); + const int len_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr); + const int proto_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr) + + sizeof(int); + const int csum_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr) + + sizeof(int) + sizeof(char); + + for (int i = 0; i < NUM_IPv6_TESTS; i++) { + saddr = (const struct in6_addr *)(random_buf + i); + daddr = (const struct in6_addr *)(random_buf + i + + daddr_offset); + len = le32_to_cpu(*(__le32 *)(random_buf + i + len_offset)); + proto = *(random_buf + i + proto_offset); + csum = *(__wsum *)(random_buf + i + csum_offset); + CHECK_EQ(to_sum16(expected_csum_ipv6_magic[i]), + csum_ipv6_magic(saddr, daddr, len, proto, csum)); + } +} + static struct kunit_case __refdata checksum_test_cases[] = { KUNIT_CASE(test_csum_fixed_random_inputs), KUNIT_CASE(test_csum_all_carry_inputs), KUNIT_CASE(test_csum_no_carry_inputs), + KUNIT_CASE(test_ip_fast_csum), + KUNIT_CASE(test_csum_ipv6_magic), {} }; diff --git a/lib/closure.c b/lib/closure.c index 0855e698ced1..c16540552d61 100644 --- a/lib/closure.c +++ b/lib/closure.c @@ -21,6 +21,10 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR)); if (!r) { + smp_acquire__after_ctrl_dep(); + + cl->closure_get_happened = false; + if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) { atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); @@ -32,7 +36,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) closure_debug_destroy(cl); if (destructor) - destructor(cl); + destructor(&cl->work); if (parent) closure_put(parent); @@ -43,7 +47,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) /* For clearing flags with the same atomic op as a put */ void closure_sub(struct closure *cl, int v) { - closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining)); + closure_put_after_sub(cl, atomic_sub_return_release(v, &cl->remaining)); } EXPORT_SYMBOL(closure_sub); @@ -52,7 +56,7 @@ EXPORT_SYMBOL(closure_sub); */ void closure_put(struct closure *cl) { - closure_put_after_sub(cl, atomic_dec_return(&cl->remaining)); + closure_put_after_sub(cl, atomic_dec_return_release(&cl->remaining)); } EXPORT_SYMBOL(closure_put); @@ -90,6 +94,7 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl) if (atomic_read(&cl->remaining) & CLOSURE_WAITING) return false; + cl->closure_get_happened = true; closure_set_waiting(cl, _RET_IP_); atomic_add(CLOSURE_WAITING + 1, &cl->remaining); llist_add(&cl->list, &waitlist->list); @@ -103,8 +108,9 @@ struct closure_syncer { int done; }; -static void closure_sync_fn(struct closure *cl) +static CLOSURE_CALLBACK(closure_sync_fn) { + struct closure *cl = container_of(ws, struct closure, work); struct closure_syncer *s = cl->s; struct task_struct *p; diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c index d4572dbc9145..705b82736be0 100644 --- a/lib/cmdline_kunit.c +++ b/lib/cmdline_kunit.c @@ -124,7 +124,7 @@ static void cmdline_do_one_range_test(struct kunit *test, const char *in, n, e[0], r[0]); p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0])); - KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %u out of bound", n, p - r); + KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %td out of bound", n, p - r); } static void cmdline_test_range(struct kunit *test) diff --git a/lib/cmpxchg-emu.c b/lib/cmpxchg-emu.c new file mode 100644 index 000000000000..27f6f97cb60d --- /dev/null +++ b/lib/cmpxchg-emu.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Emulated 1-byte cmpxchg operation for architectures lacking direct + * support for this size. This is implemented in terms of 4-byte cmpxchg + * operations. + * + * Copyright (C) 2024 Paul E. McKenney. + */ + +#include <linux/types.h> +#include <linux/export.h> +#include <linux/instrumented.h> +#include <linux/atomic.h> +#include <linux/panic.h> +#include <linux/bug.h> +#include <asm-generic/rwonce.h> +#include <linux/cmpxchg-emu.h> + +union u8_32 { + u8 b[4]; + u32 w; +}; + +/* Emulate one-byte cmpxchg() in terms of 4-byte cmpxchg. */ +uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new) +{ + u32 *p32 = (u32 *)(((uintptr_t)p) & ~0x3); + int i = ((uintptr_t)p) & 0x3; + union u8_32 old32; + union u8_32 new32; + u32 ret; + + ret = READ_ONCE(*p32); + do { + old32.w = ret; + if (old32.b[i] != old) + return old32.b[i]; + new32.w = old32.w; + new32.b[i] = new; + instrument_atomic_read_write(p, 1); + ret = data_race(cmpxchg(p32, old32.w, new32.w)); // Overridden above. + } while (ret != old32.w); + return old; +} +EXPORT_SYMBOL_GPL(cmpxchg_emu_u8); diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c index d1a7d29d2ac9..9cddf35d3b66 100644 --- a/lib/crc-ccitt.c +++ b/lib/crc-ccitt.c @@ -49,46 +49,6 @@ u16 const crc_ccitt_table[256] = { }; EXPORT_SYMBOL(crc_ccitt_table); -/* - * Similar table to calculate CRC16 variant known as CRC-CCITT-FALSE - * Reflected bits order, does not augment final value. - */ -u16 const crc_ccitt_false_table[256] = { - 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7, - 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF, - 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6, - 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE, - 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485, - 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D, - 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4, - 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC, - 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823, - 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B, - 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12, - 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A, - 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41, - 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49, - 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70, - 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78, - 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F, - 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067, - 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E, - 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256, - 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D, - 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, - 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C, - 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634, - 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB, - 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3, - 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A, - 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92, - 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9, - 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1, - 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8, - 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0 -}; -EXPORT_SYMBOL(crc_ccitt_false_table); - /** * crc_ccitt - recompute the CRC (CRC-CCITT variant) for the data * buffer @@ -104,20 +64,5 @@ u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len) } EXPORT_SYMBOL(crc_ccitt); -/** - * crc_ccitt_false - recompute the CRC (CRC-CCITT-FALSE variant) - * for the data buffer - * @crc: previous CRC value - * @buffer: data pointer - * @len: number of bytes in the buffer - */ -u16 crc_ccitt_false(u16 crc, u8 const *buffer, size_t len) -{ - while (len--) - crc = crc_ccitt_false_byte(crc, *buffer++); - return crc; -} -EXPORT_SYMBOL(crc_ccitt_false); - MODULE_DESCRIPTION("CRC-CCITT calculations"); MODULE_LICENSE("GPL"); diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 45436bfc6dff..b01253cac70a 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -8,6 +8,11 @@ config CRYPTO_LIB_UTILS config CRYPTO_LIB_AES tristate +config CRYPTO_LIB_AESCFB + tristate + select CRYPTO_LIB_AES + select CRYPTO_LIB_UTILS + config CRYPTO_LIB_AESGCM tristate select CRYPTO_LIB_AES diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 8d1446c2be71..969baab8c805 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -10,6 +10,9 @@ obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o libaes-y := aes.o +obj-$(CONFIG_CRYPTO_LIB_AESCFB) += libaescfb.o +libaescfb-y := aescfb.o + obj-$(CONFIG_CRYPTO_LIB_AESGCM) += libaesgcm.o libaesgcm-y := aesgcm.o diff --git a/lib/crypto/aescfb.c b/lib/crypto/aescfb.c new file mode 100644 index 000000000000..749dc1258a44 --- /dev/null +++ b/lib/crypto/aescfb.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Minimal library implementation of AES in CFB mode + * + * Copyright 2023 Google LLC + */ + +#include <linux/module.h> + +#include <crypto/algapi.h> +#include <crypto/aes.h> + +#include <asm/irqflags.h> + +static void aescfb_encrypt_block(const struct crypto_aes_ctx *ctx, void *dst, + const void *src) +{ + unsigned long flags; + + /* + * In AES-CFB, the AES encryption operates on known 'plaintext' (the IV + * and ciphertext), making it susceptible to timing attacks on the + * encryption key. The AES library already mitigates this risk to some + * extent by pulling the entire S-box into the caches before doing any + * substitutions, but this strategy is more effective when running with + * interrupts disabled. + */ + local_irq_save(flags); + aes_encrypt(ctx, dst, src); + local_irq_restore(flags); +} + +/** + * aescfb_encrypt - Perform AES-CFB encryption on a block of data + * + * @ctx: The AES-CFB key schedule + * @dst: Pointer to the ciphertext output buffer + * @src: Pointer the plaintext (may equal @dst for encryption in place) + * @len: The size in bytes of the plaintext and ciphertext. + * @iv: The initialization vector (IV) to use for this block of data + */ +void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src, + int len, const u8 iv[AES_BLOCK_SIZE]) +{ + u8 ks[AES_BLOCK_SIZE]; + const u8 *v = iv; + + while (len > 0) { + aescfb_encrypt_block(ctx, ks, v); + crypto_xor_cpy(dst, src, ks, min(len, AES_BLOCK_SIZE)); + v = dst; + + dst += AES_BLOCK_SIZE; + src += AES_BLOCK_SIZE; + len -= AES_BLOCK_SIZE; + } + + memzero_explicit(ks, sizeof(ks)); +} +EXPORT_SYMBOL(aescfb_encrypt); + +/** + * aescfb_decrypt - Perform AES-CFB decryption on a block of data + * + * @ctx: The AES-CFB key schedule + * @dst: Pointer to the plaintext output buffer + * @src: Pointer the ciphertext (may equal @dst for decryption in place) + * @len: The size in bytes of the plaintext and ciphertext. + * @iv: The initialization vector (IV) to use for this block of data + */ +void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src, + int len, const u8 iv[AES_BLOCK_SIZE]) +{ + u8 ks[2][AES_BLOCK_SIZE]; + + aescfb_encrypt_block(ctx, ks[0], iv); + + for (int i = 0; len > 0; i ^= 1) { + if (len > AES_BLOCK_SIZE) + /* + * Generate the keystream for the next block before + * performing the XOR, as that may update in place and + * overwrite the ciphertext. + */ + aescfb_encrypt_block(ctx, ks[!i], src); + + crypto_xor_cpy(dst, src, ks[i], min(len, AES_BLOCK_SIZE)); + + dst += AES_BLOCK_SIZE; + src += AES_BLOCK_SIZE; + len -= AES_BLOCK_SIZE; + } + + memzero_explicit(ks, sizeof(ks)); +} +EXPORT_SYMBOL(aescfb_decrypt); + +MODULE_DESCRIPTION("Generic AES-CFB library"); +MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>"); +MODULE_LICENSE("GPL"); + +#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS + +/* + * Test code below. Vectors taken from crypto/testmgr.h + */ + +static struct { + u8 ptext[64]; + u8 ctext[64]; + + u8 key[AES_MAX_KEY_SIZE]; + u8 iv[AES_BLOCK_SIZE]; + + int klen; + int len; +} const aescfb_tv[] __initconst = { + { /* From NIST SP800-38A */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" + "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f" + "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b" + "\x26\x75\x1f\x67\xa3\xcb\xb1\x40" + "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf" + "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e" + "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6", + .len = 64, + }, { + .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52" + "\xc8\x10\xf3\x2b\x80\x90\x79\xe5" + "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b", + .klen = 24, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab" + "\x34\xc2\x59\x09\xc9\x9a\x41\x74" + "\x67\xce\x7f\x7f\x81\x17\x36\x21" + "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a" + "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1" + "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9" + "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0" + "\x42\xae\x8f\xba\x58\x4b\x09\xff", + .len = 64, + }, { + .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe" + "\x2b\x73\xae\xf0\x85\x7d\x77\x81" + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", + .klen = 32, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b" + "\x7e\xcd\x84\x86\x98\x5d\x38\x60" + "\x39\xff\xed\x14\x3b\x28\xb1\xc8" + "\x32\x11\x3c\x63\x31\xe5\x40\x7b" + "\xdf\x10\x13\x24\x15\xe5\x4b\x92" + "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9" + "\x75\xa3\x85\x74\x1a\xb9\xce\xf8" + "\x20\x31\x62\x3d\x55\xb1\xe4\x71", + .len = 64, + }, { /* > 16 bytes, not a multiple of 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" + "\xc8", + .len = 17, + }, { /* < 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad", + .len = 7, + }, +}; + +static int __init libaescfb_init(void) +{ + for (int i = 0; i < ARRAY_SIZE(aescfb_tv); i++) { + struct crypto_aes_ctx ctx; + u8 buf[64]; + + if (aes_expandkey(&ctx, aescfb_tv[i].key, aescfb_tv[i].klen)) { + pr_err("aes_expandkey() failed on vector %d\n", i); + return -ENODEV; + } + + aescfb_encrypt(&ctx, buf, aescfb_tv[i].ptext, aescfb_tv[i].len, + aescfb_tv[i].iv); + if (memcmp(buf, aescfb_tv[i].ctext, aescfb_tv[i].len)) { + pr_err("aescfb_encrypt() #1 failed on vector %d\n", i); + return -ENODEV; + } + + /* decrypt in place */ + aescfb_decrypt(&ctx, buf, buf, aescfb_tv[i].len, aescfb_tv[i].iv); + if (memcmp(buf, aescfb_tv[i].ptext, aescfb_tv[i].len)) { + pr_err("aescfb_decrypt() failed on vector %d\n", i); + return -ENODEV; + } + + /* encrypt in place */ + aescfb_encrypt(&ctx, buf, buf, aescfb_tv[i].len, aescfb_tv[i].iv); + if (memcmp(buf, aescfb_tv[i].ctext, aescfb_tv[i].len)) { + pr_err("aescfb_encrypt() #2 failed on vector %d\n", i); + + return -ENODEV; + } + + } + return 0; +} +module_init(libaescfb_init); + +static void __exit libaescfb_exit(void) +{ +} +module_exit(libaescfb_exit); +#endif diff --git a/lib/crypto/aesgcm.c b/lib/crypto/aesgcm.c index c632d6e17af8..6bba6473fdf3 100644 --- a/lib/crypto/aesgcm.c +++ b/lib/crypto/aesgcm.c @@ -73,6 +73,19 @@ static void aesgcm_ghash(be128 *ghash, const be128 *key, const void *src, } } +/** + * aesgcm_mac - Generates the authentication tag using AES-GCM algorithm. + * @ctx: The data structure that will hold the AES-GCM key schedule + * @src: The input source data. + * @src_len: Length of the source data. + * @assoc: Points to the associated data. + * @assoc_len: Length of the associated data values. + * @ctr: Points to the counter value. + * @authtag: The output buffer for the authentication tag. + * + * It takes in the AES-GCM context, source data, associated data, counter value, + * and an output buffer for the authentication tag. + */ static void aesgcm_mac(const struct aesgcm_ctx *ctx, const u8 *src, int src_len, const u8 *assoc, int assoc_len, __be32 *ctr, u8 *authtag) { diff --git a/lib/crypto/mpi/ec.c b/lib/crypto/mpi/ec.c index 40f5908e57a4..e16dca1e23d5 100644 --- a/lib/crypto/mpi/ec.c +++ b/lib/crypto/mpi/ec.c @@ -584,6 +584,9 @@ void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model, ctx->a = mpi_copy(a); ctx->b = mpi_copy(b); + ctx->d = NULL; + ctx->t.two_inv_p = NULL; + ctx->t.p_barrett = use_barrett > 0 ? mpi_barrett_init(ctx->p, 0) : NULL; mpi_ec_get_reset(ctx); diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 2a8e9d63fbe3..fb12a9bacd2f 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -620,9 +620,8 @@ static void debug_objects_fill_pool(void) static void __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) { - enum debug_obj_state state; + struct debug_obj *obj, o; struct debug_bucket *db; - struct debug_obj *obj; unsigned long flags; debug_objects_fill_pool(); @@ -643,24 +642,18 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: obj->state = ODEBUG_STATE_INIT; - break; - - case ODEBUG_STATE_ACTIVE: - state = obj->state; - raw_spin_unlock_irqrestore(&db->lock, flags); - debug_print_object(obj, "init"); - debug_object_fixup(descr->fixup_init, addr, state); - return; - - case ODEBUG_STATE_DESTROYED: raw_spin_unlock_irqrestore(&db->lock, flags); - debug_print_object(obj, "init"); return; default: break; } + o = *obj; raw_spin_unlock_irqrestore(&db->lock, flags); + debug_print_object(&o, "init"); + + if (o.state == ODEBUG_STATE_ACTIVE) + debug_object_fixup(descr->fixup_init, addr, o.state); } /** @@ -701,11 +694,9 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack); int debug_object_activate(void *addr, const struct debug_obj_descr *descr) { struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; - enum debug_obj_state state; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; - int ret; if (!debug_objects_enabled) return 0; @@ -717,49 +708,38 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr) raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object_or_alloc(addr, db, descr, false, true); - if (likely(!IS_ERR_OR_NULL(obj))) { - bool print_object = false; - + if (unlikely(!obj)) { + raw_spin_unlock_irqrestore(&db->lock, flags); + debug_objects_oom(); + return 0; + } else if (likely(!IS_ERR(obj))) { switch (obj->state) { - case ODEBUG_STATE_INIT: - case ODEBUG_STATE_INACTIVE: - obj->state = ODEBUG_STATE_ACTIVE; - ret = 0; - break; - case ODEBUG_STATE_ACTIVE: - state = obj->state; - raw_spin_unlock_irqrestore(&db->lock, flags); - debug_print_object(obj, "activate"); - ret = debug_object_fixup(descr->fixup_activate, addr, state); - return ret ? 0 : -EINVAL; - case ODEBUG_STATE_DESTROYED: - print_object = true; - ret = -EINVAL; + o = *obj; break; + case ODEBUG_STATE_INIT: + case ODEBUG_STATE_INACTIVE: + obj->state = ODEBUG_STATE_ACTIVE; + fallthrough; default: - ret = 0; - break; + raw_spin_unlock_irqrestore(&db->lock, flags); + return 0; } - raw_spin_unlock_irqrestore(&db->lock, flags); - if (print_object) - debug_print_object(obj, "activate"); - return ret; } raw_spin_unlock_irqrestore(&db->lock, flags); + debug_print_object(&o, "activate"); - /* If NULL the allocation has hit OOM */ - if (!obj) { - debug_objects_oom(); - return 0; + switch (o.state) { + case ODEBUG_STATE_ACTIVE: + case ODEBUG_STATE_NOTAVAILABLE: + if (debug_object_fixup(descr->fixup_activate, addr, o.state)) + return 0; + fallthrough; + default: + return -EINVAL; } - - /* Object is neither static nor tracked. It's not initialized */ - debug_print_object(&o, "activate"); - ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE); - return ret ? 0 : -EINVAL; } EXPORT_SYMBOL_GPL(debug_object_activate); @@ -770,10 +750,10 @@ EXPORT_SYMBOL_GPL(debug_object_activate); */ void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) { + struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; - bool print_object = false; if (!debug_objects_enabled) return; @@ -785,33 +765,24 @@ void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) obj = lookup_object(addr, db); if (obj) { switch (obj->state) { + case ODEBUG_STATE_DESTROYED: + break; case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: case ODEBUG_STATE_ACTIVE: - if (!obj->astate) - obj->state = ODEBUG_STATE_INACTIVE; - else - print_object = true; - break; - - case ODEBUG_STATE_DESTROYED: - print_object = true; - break; + if (obj->astate) + break; + obj->state = ODEBUG_STATE_INACTIVE; + fallthrough; default: - break; + raw_spin_unlock_irqrestore(&db->lock, flags); + return; } + o = *obj; } raw_spin_unlock_irqrestore(&db->lock, flags); - if (!obj) { - struct debug_obj o = { .object = addr, - .state = ODEBUG_STATE_NOTAVAILABLE, - .descr = descr }; - - debug_print_object(&o, "deactivate"); - } else if (print_object) { - debug_print_object(obj, "deactivate"); - } + debug_print_object(&o, "deactivate"); } EXPORT_SYMBOL_GPL(debug_object_deactivate); @@ -822,11 +793,9 @@ EXPORT_SYMBOL_GPL(debug_object_deactivate); */ void debug_object_destroy(void *addr, const struct debug_obj_descr *descr) { - enum debug_obj_state state; + struct debug_obj *obj, o; struct debug_bucket *db; - struct debug_obj *obj; unsigned long flags; - bool print_object = false; if (!debug_objects_enabled) return; @@ -836,32 +805,31 @@ void debug_object_destroy(void *addr, const struct debug_obj_descr *descr) raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); - if (!obj) - goto out_unlock; + if (!obj) { + raw_spin_unlock_irqrestore(&db->lock, flags); + return; + } switch (obj->state) { + case ODEBUG_STATE_ACTIVE: + case ODEBUG_STATE_DESTROYED: + break; case ODEBUG_STATE_NONE: case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: obj->state = ODEBUG_STATE_DESTROYED; - break; - case ODEBUG_STATE_ACTIVE: - state = obj->state; + fallthrough; + default: raw_spin_unlock_irqrestore(&db->lock, flags); - debug_print_object(obj, "destroy"); - debug_object_fixup(descr->fixup_destroy, addr, state); return; - - case ODEBUG_STATE_DESTROYED: - print_object = true; - break; - default: - break; } -out_unlock: + + o = *obj; raw_spin_unlock_irqrestore(&db->lock, flags); - if (print_object) - debug_print_object(obj, "destroy"); + debug_print_object(&o, "destroy"); + + if (o.state == ODEBUG_STATE_ACTIVE) + debug_object_fixup(descr->fixup_destroy, addr, o.state); } EXPORT_SYMBOL_GPL(debug_object_destroy); @@ -872,9 +840,8 @@ EXPORT_SYMBOL_GPL(debug_object_destroy); */ void debug_object_free(void *addr, const struct debug_obj_descr *descr) { - enum debug_obj_state state; + struct debug_obj *obj, o; struct debug_bucket *db; - struct debug_obj *obj; unsigned long flags; if (!debug_objects_enabled) @@ -885,24 +852,26 @@ void debug_object_free(void *addr, const struct debug_obj_descr *descr) raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); - if (!obj) - goto out_unlock; + if (!obj) { + raw_spin_unlock_irqrestore(&db->lock, flags); + return; + } switch (obj->state) { case ODEBUG_STATE_ACTIVE: - state = obj->state; - raw_spin_unlock_irqrestore(&db->lock, flags); - debug_print_object(obj, "free"); - debug_object_fixup(descr->fixup_free, addr, state); - return; + break; default: hlist_del(&obj->node); raw_spin_unlock_irqrestore(&db->lock, flags); free_object(obj); return; } -out_unlock: + + o = *obj; raw_spin_unlock_irqrestore(&db->lock, flags); + debug_print_object(&o, "free"); + + debug_object_fixup(descr->fixup_free, addr, o.state); } EXPORT_SYMBOL_GPL(debug_object_free); @@ -954,10 +923,10 @@ void debug_object_active_state(void *addr, const struct debug_obj_descr *descr, unsigned int expect, unsigned int next) { + struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; - bool print_object = false; if (!debug_objects_enabled) return; @@ -970,28 +939,19 @@ debug_object_active_state(void *addr, const struct debug_obj_descr *descr, if (obj) { switch (obj->state) { case ODEBUG_STATE_ACTIVE: - if (obj->astate == expect) - obj->astate = next; - else - print_object = true; - break; - + if (obj->astate != expect) + break; + obj->astate = next; + raw_spin_unlock_irqrestore(&db->lock, flags); + return; default: - print_object = true; break; } + o = *obj; } raw_spin_unlock_irqrestore(&db->lock, flags); - if (!obj) { - struct debug_obj o = { .object = addr, - .state = ODEBUG_STATE_NOTAVAILABLE, - .descr = descr }; - - debug_print_object(&o, "active_state"); - } else if (print_object) { - debug_print_object(obj, "active_state"); - } + debug_print_object(&o, "active_state"); } EXPORT_SYMBOL_GPL(debug_object_active_state); @@ -999,12 +959,10 @@ EXPORT_SYMBOL_GPL(debug_object_active_state); static void __debug_check_no_obj_freed(const void *address, unsigned long size) { unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; - const struct debug_obj_descr *descr; - enum debug_obj_state state; + int cnt, objs_checked = 0; + struct debug_obj *obj, o; struct debug_bucket *db; struct hlist_node *tmp; - struct debug_obj *obj; - int cnt, objs_checked = 0; saddr = (unsigned long) address; eaddr = saddr + size; @@ -1026,12 +984,10 @@ repeat: switch (obj->state) { case ODEBUG_STATE_ACTIVE: - descr = obj->descr; - state = obj->state; + o = *obj; raw_spin_unlock_irqrestore(&db->lock, flags); - debug_print_object(obj, "free"); - debug_object_fixup(descr->fixup_free, - (void *) oaddr, state); + debug_print_object(&o, "free"); + debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state); goto repeat; default: hlist_del(&obj->node); diff --git a/lib/devres.c b/lib/devres.c index c44f104b58d5..fe0c63caeb68 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/device.h> #include <linux/err.h> -#include <linux/pci.h> #include <linux/io.h> #include <linux/gfp.h> #include <linux/export.h> @@ -311,212 +311,6 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr) EXPORT_SYMBOL(devm_ioport_unmap); #endif /* CONFIG_HAS_IOPORT_MAP */ -#ifdef CONFIG_PCI -/* - * PCI iomap devres - */ -#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS - -struct pcim_iomap_devres { - void __iomem *table[PCIM_IOMAP_MAX]; -}; - -static void pcim_iomap_release(struct device *gendev, void *res) -{ - struct pci_dev *dev = to_pci_dev(gendev); - struct pcim_iomap_devres *this = res; - int i; - - for (i = 0; i < PCIM_IOMAP_MAX; i++) - if (this->table[i]) - pci_iounmap(dev, this->table[i]); -} - -/** - * pcim_iomap_table - access iomap allocation table - * @pdev: PCI device to access iomap table for - * - * Access iomap allocation table for @dev. If iomap table doesn't - * exist and @pdev is managed, it will be allocated. All iomaps - * recorded in the iomap table are automatically unmapped on driver - * detach. - * - * This function might sleep when the table is first allocated but can - * be safely called without context and guaranteed to succeed once - * allocated. - */ -void __iomem * const *pcim_iomap_table(struct pci_dev *pdev) -{ - struct pcim_iomap_devres *dr, *new_dr; - - dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); - if (dr) - return dr->table; - - new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL, - dev_to_node(&pdev->dev)); - if (!new_dr) - return NULL; - dr = devres_get(&pdev->dev, new_dr, NULL, NULL); - return dr->table; -} -EXPORT_SYMBOL(pcim_iomap_table); - -/** - * pcim_iomap - Managed pcim_iomap() - * @pdev: PCI device to iomap for - * @bar: BAR to iomap - * @maxlen: Maximum length of iomap - * - * Managed pci_iomap(). Map is automatically unmapped on driver - * detach. - */ -void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) -{ - void __iomem **tbl; - - BUG_ON(bar >= PCIM_IOMAP_MAX); - - tbl = (void __iomem **)pcim_iomap_table(pdev); - if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ - return NULL; - - tbl[bar] = pci_iomap(pdev, bar, maxlen); - return tbl[bar]; -} -EXPORT_SYMBOL(pcim_iomap); - -/** - * pcim_iounmap - Managed pci_iounmap() - * @pdev: PCI device to iounmap for - * @addr: Address to unmap - * - * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). - */ -void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) -{ - void __iomem **tbl; - int i; - - pci_iounmap(pdev, addr); - - tbl = (void __iomem **)pcim_iomap_table(pdev); - BUG_ON(!tbl); - - for (i = 0; i < PCIM_IOMAP_MAX; i++) - if (tbl[i] == addr) { - tbl[i] = NULL; - return; - } - WARN_ON(1); -} -EXPORT_SYMBOL(pcim_iounmap); - -/** - * pcim_iomap_regions - Request and iomap PCI BARs - * @pdev: PCI device to map IO resources for - * @mask: Mask of BARs to request and iomap - * @name: Name used when requesting regions - * - * Request and iomap regions specified by @mask. - */ -int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name) -{ - void __iomem * const *iomap; - int i, rc; - - iomap = pcim_iomap_table(pdev); - if (!iomap) - return -ENOMEM; - - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { - unsigned long len; - - if (!(mask & (1 << i))) - continue; - - rc = -EINVAL; - len = pci_resource_len(pdev, i); - if (!len) - goto err_inval; - - rc = pci_request_region(pdev, i, name); - if (rc) - goto err_inval; - - rc = -ENOMEM; - if (!pcim_iomap(pdev, i, 0)) - goto err_region; - } - - return 0; - - err_region: - pci_release_region(pdev, i); - err_inval: - while (--i >= 0) { - if (!(mask & (1 << i))) - continue; - pcim_iounmap(pdev, iomap[i]); - pci_release_region(pdev, i); - } - - return rc; -} -EXPORT_SYMBOL(pcim_iomap_regions); - -/** - * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones - * @pdev: PCI device to map IO resources for - * @mask: Mask of BARs to iomap - * @name: Name used when requesting regions - * - * Request all PCI BARs and iomap regions specified by @mask. - */ -int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, - const char *name) -{ - int request_mask = ((1 << 6) - 1) & ~mask; - int rc; - - rc = pci_request_selected_regions(pdev, request_mask, name); - if (rc) - return rc; - - rc = pcim_iomap_regions(pdev, mask, name); - if (rc) - pci_release_selected_regions(pdev, request_mask); - return rc; -} -EXPORT_SYMBOL(pcim_iomap_regions_request_all); - -/** - * pcim_iounmap_regions - Unmap and release PCI BARs - * @pdev: PCI device to map IO resources for - * @mask: Mask of BARs to unmap and release - * - * Unmap and release regions specified by @mask. - */ -void pcim_iounmap_regions(struct pci_dev *pdev, int mask) -{ - void __iomem * const *iomap; - int i; - - iomap = pcim_iomap_table(pdev); - if (!iomap) - return; - - for (i = 0; i < PCIM_IOMAP_MAX; i++) { - if (!(mask & (1 << i))) - continue; - - pcim_iounmap(pdev, iomap[i]); - pci_release_region(pdev, i); - } -} -EXPORT_SYMBOL(pcim_iounmap_regions); -#endif /* CONFIG_PCI */ - static void devm_arch_phys_ac_add_release(struct device *dev, void *res) { arch_phys_wc_del(*((int *)res)); diff --git a/lib/dhry_1.c b/lib/dhry_1.c index 08edbbb19f57..ca6c87232c58 100644 --- a/lib/dhry_1.c +++ b/lib/dhry_1.c @@ -277,7 +277,7 @@ int dhry(int n) dhry_assert_string_eq(Str_1_Loc, "DHRYSTONE PROGRAM, 1'ST STRING"); dhry_assert_string_eq(Str_2_Loc, "DHRYSTONE PROGRAM, 2'ND STRING"); - User_Time = ktime_to_ms(ktime_sub(End_Time, Begin_Time)); + User_Time = ktime_ms_delta(End_Time, Begin_Time); kfree(Ptr_Glob); kfree(Next_Ptr_Glob); diff --git a/lib/dhry_run.c b/lib/dhry_run.c index f15ac666e9d3..e6a279dabf84 100644 --- a/lib/dhry_run.c +++ b/lib/dhry_run.c @@ -10,7 +10,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/mutex.h> #include <linux/smp.h> #define DHRY_VAX 1757 diff --git a/lib/dim/Makefile b/lib/dim/Makefile index 1d6858a108cb..c4cc4026c451 100644 --- a/lib/dim/Makefile +++ b/lib/dim/Makefile @@ -2,6 +2,6 @@ # DIM Dynamic Interrupt Moderation library # -obj-$(CONFIG_DIMLIB) += dim.o +obj-$(CONFIG_DIMLIB) += dimlib.o -dim-y := dim.o net_dim.o rdma_dim.o +dimlib-objs := dim.o net_dim.o rdma_dim.o diff --git a/lib/dim/dim.c b/lib/dim/dim.c index e89aaf07bde5..83b65ac74d73 100644 --- a/lib/dim/dim.c +++ b/lib/dim/dim.c @@ -82,3 +82,6 @@ bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end, return true; } EXPORT_SYMBOL(dim_calc_stats); + +MODULE_DESCRIPTION("Dynamic Interrupt Moderation (DIM) library"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/lib/dump_stack.c b/lib/dump_stack.c index 83471e81501a..222c6d6c8281 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -96,15 +96,25 @@ static void __dump_stack(const char *log_lvl) */ asmlinkage __visible void dump_stack_lvl(const char *log_lvl) { + bool in_panic = this_cpu_in_panic(); unsigned long flags; /* * Permit this cpu to perform nested stack dumps while serialising - * against other CPUs + * against other CPUs, unless this CPU is in panic. + * + * When in panic, non-panic CPUs are not permitted to store new + * printk messages so there is no need to synchronize the output. + * This avoids potential deadlock in panic() if another CPU is + * holding and unable to release the printk_cpu_sync. */ - printk_cpu_sync_get_irqsave(flags); + if (!in_panic) + printk_cpu_sync_get_irqsave(flags); + __dump_stack(log_lvl); - printk_cpu_sync_put_irqrestore(flags); + + if (!in_panic) + printk_cpu_sync_put_irqrestore(flags); } EXPORT_SYMBOL(dump_stack_lvl); diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 6fba6423cc10..f2c5e7910bb1 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -302,7 +302,11 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) } else { for (end = buf; *end && !isspace(*end); end++) ; - BUG_ON(end == buf); + if (end == buf) { + pr_err("parse err after word:%d=%s\n", nwords, + nwords ? words[nwords - 1] : "<none>"); + return -EINVAL; + } } /* `buf' is start of word, `end' is one past its end */ @@ -640,10 +644,9 @@ static int param_set_dyndbg_classnames(const char *instr, const struct kernel_pa int cls_id, totct = 0; bool wanted; - cl_str = tmp = kstrdup(instr, GFP_KERNEL); - p = strchr(cl_str, '\n'); - if (p) - *p = '\0'; + cl_str = tmp = kstrdup_and_replace(instr, '\n', '\0', GFP_KERNEL); + if (!tmp) + return -ENOMEM; /* start with previously set state-bits, then modify */ curr_bits = old_bits = *dcp->bits; diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index fde0aa244148..e49deddd3de9 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c @@ -10,18 +10,90 @@ #include <linux/dynamic_queue_limits.h> #include <linux/compiler.h> #include <linux/export.h> +#include <trace/events/napi.h> #define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0) #define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0) +static void dql_check_stall(struct dql *dql, unsigned short stall_thrs) +{ + unsigned long now; + + if (!stall_thrs) + return; + + now = jiffies; + /* Check for a potential stall */ + if (time_after_eq(now, dql->last_reap + stall_thrs)) { + unsigned long hist_head, t, start, end; + + /* We are trying to detect a period of at least @stall_thrs + * jiffies without any Tx completions, but during first half + * of which some Tx was posted. + */ +dqs_again: + hist_head = READ_ONCE(dql->history_head); + /* pairs with smp_wmb() in dql_queued() */ + smp_rmb(); + + /* Get the previous entry in the ring buffer, which is the + * oldest sample. + */ + start = (hist_head - DQL_HIST_LEN + 1) * BITS_PER_LONG; + + /* Advance start to continue from the last reap time */ + if (time_before(start, dql->last_reap + 1)) + start = dql->last_reap + 1; + + /* Newest sample we should have already seen a completion for */ + end = hist_head * BITS_PER_LONG + (BITS_PER_LONG - 1); + + /* Shrink the search space to [start, (now - start_thrs/2)] if + * `end` is beyond the stall zone + */ + if (time_before(now, end + stall_thrs / 2)) + end = now - stall_thrs / 2; + + /* Search for the queued time in [t, end] */ + for (t = start; time_before_eq(t, end); t++) + if (test_bit(t % (DQL_HIST_LEN * BITS_PER_LONG), + dql->history)) + break; + + /* Variable t contains the time of the queue */ + if (!time_before_eq(t, end)) + goto no_stall; + + /* The ring buffer was modified in the meantime, retry */ + if (hist_head != READ_ONCE(dql->history_head)) + goto dqs_again; + + dql->stall_cnt++; + dql->stall_max = max_t(unsigned short, dql->stall_max, now - t); + + trace_dql_stall_detected(dql->stall_thrs, now - t, + dql->last_reap, dql->history_head, + now, dql->history); + } +no_stall: + dql->last_reap = now; +} + /* Records completed count and recalculates the queue limit */ void dql_completed(struct dql *dql, unsigned int count) { unsigned int inprogress, prev_inprogress, limit; unsigned int ovlimit, completed, num_queued; + unsigned short stall_thrs; bool all_prev_completed; num_queued = READ_ONCE(dql->num_queued); + /* Read stall_thrs in advance since it belongs to the same (first) + * cache line as ->num_queued. This way, dql_check_stall() does not + * need to touch the first cache line again later, reducing the window + * of possible false sharing. + */ + stall_thrs = READ_ONCE(dql->stall_thrs); /* Can't complete more than what's in queue */ BUG_ON(count > num_queued - dql->num_completed); @@ -110,6 +182,8 @@ void dql_completed(struct dql *dql, unsigned int count) dql->prev_last_obj_cnt = dql->last_obj_cnt; dql->num_completed = completed; dql->prev_num_queued = num_queued; + + dql_check_stall(dql, stall_thrs); } EXPORT_SYMBOL(dql_completed); @@ -125,6 +199,10 @@ void dql_reset(struct dql *dql) dql->prev_ovlimit = 0; dql->lowest_slack = UINT_MAX; dql->slack_start_time = jiffies; + + dql->last_reap = jiffies; + dql->history_head = jiffies / BITS_PER_LONG; + memset(dql->history, 0, sizeof(dql->history)); } EXPORT_SYMBOL(dql_reset); @@ -133,6 +211,7 @@ void dql_init(struct dql *dql, unsigned int hold_time) dql->max_limit = DQL_MAX_LIMIT; dql->min_limit = 0; dql->slack_hold_time = hold_time; + dql->stall_thrs = 0; dql_reset(dql); } EXPORT_SYMBOL(dql_init); diff --git a/lib/errname.c b/lib/errname.c index dd1b998552cd..4f9112b38f3a 100644 --- a/lib/errname.c +++ b/lib/errname.c @@ -111,9 +111,6 @@ static const char *names_0[] = { E(ENOSPC), E(ENOSR), E(ENOSTR), -#ifdef ENOSYM - E(ENOSYM), -#endif E(ENOSYS), E(ENOTBLK), E(ENOTCONN), @@ -144,9 +141,6 @@ static const char *names_0[] = { #endif E(EREMOTE), E(EREMOTEIO), -#ifdef EREMOTERELEASE - E(EREMOTERELEASE), -#endif E(ERESTART), E(ERFKILL), E(EROFS), diff --git a/lib/find_bit.c b/lib/find_bit.c index 32f99e9a670e..dacadd904250 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -116,6 +116,18 @@ unsigned long _find_first_and_bit(const unsigned long *addr1, EXPORT_SYMBOL(_find_first_and_bit); #endif +/* + * Find the first set bit in three memory regions. + */ +unsigned long _find_first_and_and_bit(const unsigned long *addr1, + const unsigned long *addr2, + const unsigned long *addr3, + unsigned long size) +{ + return FIND_FIRST_BIT(addr1[idx] & addr2[idx] & addr3[idx], /* nop */, size); +} +EXPORT_SYMBOL(_find_first_and_and_bit); + #ifndef find_first_zero_bit /* * Find the first cleared bit in a memory region. diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index 83332fefa6f4..84ecccddc771 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c @@ -84,83 +84,6 @@ bool fprop_new_period(struct fprop_global *p, int periods) } /* - * ---- SINGLE ---- - */ - -int fprop_local_init_single(struct fprop_local_single *pl) -{ - pl->events = 0; - pl->period = 0; - raw_spin_lock_init(&pl->lock); - return 0; -} - -void fprop_local_destroy_single(struct fprop_local_single *pl) -{ -} - -static void fprop_reflect_period_single(struct fprop_global *p, - struct fprop_local_single *pl) -{ - unsigned int period = p->period; - unsigned long flags; - - /* Fast path - period didn't change */ - if (pl->period == period) - return; - raw_spin_lock_irqsave(&pl->lock, flags); - /* Someone updated pl->period while we were spinning? */ - if (pl->period >= period) { - raw_spin_unlock_irqrestore(&pl->lock, flags); - return; - } - /* Aging zeroed our fraction? */ - if (period - pl->period < BITS_PER_LONG) - pl->events >>= period - pl->period; - else - pl->events = 0; - pl->period = period; - raw_spin_unlock_irqrestore(&pl->lock, flags); -} - -/* Event of type pl happened */ -void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl) -{ - fprop_reflect_period_single(p, pl); - pl->events++; - percpu_counter_add(&p->events, 1); -} - -/* Return fraction of events of type pl */ -void fprop_fraction_single(struct fprop_global *p, - struct fprop_local_single *pl, - unsigned long *numerator, unsigned long *denominator) -{ - unsigned int seq; - s64 num, den; - - do { - seq = read_seqcount_begin(&p->sequence); - fprop_reflect_period_single(p, pl); - num = pl->events; - den = percpu_counter_read_positive(&p->events); - } while (read_seqcount_retry(&p->sequence, seq)); - - /* - * Make fraction <= 1 and denominator > 0 even in presence of percpu - * counter errors - */ - if (den <= num) { - if (num) - den = num; - else - den = 1; - } - *denominator = den; - *numerator = num; -} - -/* * ---- PERCPU ---- */ #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids))) diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig index c035fde66aeb..7e945fdcbf11 100644 --- a/lib/fonts/Kconfig +++ b/lib/fonts/Kconfig @@ -98,7 +98,8 @@ config FONT_10x18 config FONT_SUN8x16 bool "Sparc console 8x16 font" - depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC) + depends on (FRAMEBUFFER_CONSOLE && (FONTS || SPARC)) || \ + BOOTX_TEXT || EARLYFB help This is the high resolution console font for Sun machines. Say Y. diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c index 973866438608..47e34950b665 100644 --- a/lib/fonts/fonts.c +++ b/lib/fonts/fonts.c @@ -96,18 +96,21 @@ EXPORT_SYMBOL(find_font); * get_default_font - get default font * @xres: screen size of X * @yres: screen size of Y - * @font_w: bit array of supported widths (1 - 32) - * @font_h: bit array of supported heights (1 - 32) + * @font_w: bit array of supported widths (1 - FB_MAX_BLIT_WIDTH) + * @font_h: bit array of supported heights (1 - FB_MAX_BLIT_HEIGHT) * * Get the default font for a specified screen size. * Dimensions are in pixels. * + * font_w or font_h being NULL means all values are supported. + * * Returns %NULL if no font is found, or a pointer to the * chosen font. * */ -const struct font_desc *get_default_font(int xres, int yres, u32 font_w, - u32 font_h) +const struct font_desc *get_default_font(int xres, int yres, + unsigned long *font_w, + unsigned long *font_h) { int i, c, cc, res; const struct font_desc *f, *g; @@ -135,8 +138,8 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w, if (res > 20) c += 20 - res; - if ((font_w & (1U << (f->width - 1))) && - (font_h & (1U << (f->height - 1)))) + if ((!font_w || test_bit(f->width - 1, font_w)) && + (!font_h || test_bit(f->height - 1, font_h))) c += 1000; if (c > cc) { diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c index c8c33cbaae9e..d2377e00caab 100644 --- a/lib/fortify_kunit.c +++ b/lib/fortify_kunit.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Runtime test cases for CONFIG_FORTIFY_SOURCE that aren't expected to - * Oops the kernel on success. (For those, see drivers/misc/lkdtm/fortify.c) + * Runtime test cases for CONFIG_FORTIFY_SOURCE. For additional memcpy() + * testing see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c). * * For corner cases with UBSAN, try testing with: * @@ -15,17 +15,73 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +/* We don't need to fill dmesg with the fortify WARNs during testing. */ +#ifdef DEBUG +# define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x) +# define FORTIFY_WARN_KUNIT(x...) WARN_ONCE(x) +#else +# define FORTIFY_REPORT_KUNIT(x...) do { } while (0) +# define FORTIFY_WARN_KUNIT(x...) do { } while (0) +#endif + +/* Redefine fortify_panic() to track failures. */ +void fortify_add_kunit_error(int write); +#define fortify_panic(func, write, avail, size, retfail) do { \ + FORTIFY_REPORT_KUNIT(FORTIFY_REASON(func, write), avail, size); \ + fortify_add_kunit_error(write); \ + return (retfail); \ +} while (0) + +/* Redefine fortify_warn_once() to track memcpy() failures. */ +#define fortify_warn_once(chk_func, x...) do { \ + bool __result = chk_func; \ + FORTIFY_WARN_KUNIT(__result, x); \ + if (__result) \ + fortify_add_kunit_error(1); \ +} while (0) + +#include <kunit/device.h> #include <kunit/test.h> +#include <kunit/test-bug.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/vmalloc.h> +/* Handle being built without CONFIG_FORTIFY_SOURCE */ +#ifndef __compiletime_strlen +# define __compiletime_strlen __builtin_strlen +#endif + +static struct kunit_resource read_resource; +static struct kunit_resource write_resource; +static int fortify_read_overflows; +static int fortify_write_overflows; + static const char array_of_10[] = "this is 10"; static const char *ptr_of_11 = "this is 11!"; static char array_unknown[] = "compiler thinks I might change"; -static void known_sizes_test(struct kunit *test) +void fortify_add_kunit_error(int write) +{ + struct kunit_resource *resource; + struct kunit *current_test; + + current_test = kunit_get_current_test(); + if (!current_test) + return; + + resource = kunit_find_named_resource(current_test, + write ? "fortify_write_overflows" + : "fortify_read_overflows"); + if (!resource) + return; + + (*(int *)resource->data)++; + kunit_put_resource(resource); +} + +static void fortify_test_known_sizes(struct kunit *test) { KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8); KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10); @@ -58,7 +114,7 @@ static noinline size_t want_minus_one(int pick) return __compiletime_strlen(str); } -static void control_flow_split_test(struct kunit *test) +static void fortify_test_control_flow_split(struct kunit *test) { KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX); } @@ -134,11 +190,11 @@ static volatile size_t unknown_size = 50; #endif #define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \ -static void alloc_size_##allocator##_const_test(struct kunit *test) \ +static void fortify_test_alloc_size_##allocator##_const(struct kunit *test) \ { \ CONST_TEST_BODY(TEST_##allocator); \ } \ -static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \ +static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \ { \ DYNAMIC_TEST_BODY(TEST_##allocator); \ } @@ -228,28 +284,28 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc) \ checker((expected_pages) * PAGE_SIZE, \ kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \ - vfree(p)); \ + kvfree(p)); \ \ prev_size = (expected_pages) * PAGE_SIZE; \ orig = kvmalloc(prev_size, gfp); \ @@ -269,7 +325,7 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc) size_t len; \ \ /* Create dummy device for devm_kmalloc()-family tests. */ \ - dev = root_device_register(dev_name); \ + dev = kunit_device_register(test, dev_name); \ KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), \ "Cannot register test device\n"); \ \ @@ -303,26 +359,741 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc) checker(len, devm_kmemdup(dev, "Ohai", len, gfp), \ devm_kfree(dev, p)); \ \ - device_unregister(dev); \ + kunit_device_unregister(test, dev); \ } while (0) DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc) +static const char * const test_strs[] = { + "", + "Hello there", + "A longer string, just for variety", +}; + +#define TEST_realloc(checker) do { \ + gfp_t gfp = GFP_KERNEL; \ + size_t len; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(test_strs); i++) { \ + len = strlen(test_strs[i]); \ + KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0); \ + checker(len, kmemdup_array(test_strs[i], len, 1, gfp), \ + kfree(p)); \ + checker(len, kmemdup(test_strs[i], len, gfp), \ + kfree(p)); \ + } \ +} while (0) +static void fortify_test_realloc_size(struct kunit *test) +{ + TEST_realloc(check_dynamic); +} + +/* + * We can't have an array at the end of a structure or else + * builds without -fstrict-flex-arrays=3 will report them as + * being an unknown length. Additionally, add bytes before + * and after the string to catch over/underflows if tests + * fail. + */ +struct fortify_padding { + unsigned long bytes_before; + char buf[32]; + unsigned long bytes_after; +}; +/* Force compiler into not being able to resolve size at compile-time. */ +static volatile int unconst; + +static void fortify_test_strlen(struct kunit *test) +{ + struct fortify_padding pad = { }; + int i, end = sizeof(pad.buf) - 1; + + /* Fill 31 bytes with valid characters. */ + for (i = 0; i < sizeof(pad.buf) - 1; i++) + pad.buf[i] = i + '0'; + /* Trailing bytes are still %NUL. */ + KUNIT_EXPECT_EQ(test, pad.buf[end], '\0'); + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* String is terminated, so strlen() is valid. */ + KUNIT_EXPECT_EQ(test, strlen(pad.buf), end); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + + /* Make string unterminated, and recount. */ + pad.buf[end] = 'A'; + end = sizeof(pad.buf); + KUNIT_EXPECT_EQ(test, strlen(pad.buf), end); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); +} + +static void fortify_test_strnlen(struct kunit *test) +{ + struct fortify_padding pad = { }; + int i, end = sizeof(pad.buf) - 1; + + /* Fill 31 bytes with valid characters. */ + for (i = 0; i < sizeof(pad.buf) - 1; i++) + pad.buf[i] = i + '0'; + /* Trailing bytes are still %NUL. */ + KUNIT_EXPECT_EQ(test, pad.buf[end], '\0'); + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* String is terminated, so strnlen() is valid. */ + KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf)), end); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + /* A truncated strnlen() will be safe, too. */ + KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf) / 2), + sizeof(pad.buf) / 2); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + + /* Make string unterminated, and recount. */ + pad.buf[end] = 'A'; + end = sizeof(pad.buf); + /* Reading beyond with strncpy() will fail. */ + KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 1), end); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); + KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 2), end); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); + + /* Early-truncated is safe still, though. */ + KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); + + end = sizeof(pad.buf) / 2; + KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); +} + +static void fortify_test_strcpy(struct kunit *test) +{ + struct fortify_padding pad = { }; + char src[sizeof(pad.buf) + 1] = { }; + int i; + + /* Fill 31 bytes with valid characters. */ + for (i = 0; i < sizeof(src) - 2; i++) + src[i] = i + '0'; + + /* Destination is %NUL-filled to start with. */ + KUNIT_EXPECT_EQ(test, pad.bytes_before, 0); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* Legitimate strcpy() 1 less than of max size. */ + KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src) + == pad.buf); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); + /* Only last byte should be %NUL */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + + src[sizeof(src) - 2] = 'A'; + /* But now we trip the overflow checking. */ + KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src) + == pad.buf); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); + /* Trailing %NUL -- thanks to FORTIFY. */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + /* And we will not have gone beyond. */ + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + src[sizeof(src) - 1] = 'A'; + /* And for sure now, two bytes past. */ + KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src) + == pad.buf); + /* + * Which trips both the strlen() on the unterminated src, + * and the resulting copy attempt. + */ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); + /* Trailing %NUL -- thanks to FORTIFY. */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + /* And we will not have gone beyond. */ + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); +} + +static void fortify_test_strncpy(struct kunit *test) +{ + struct fortify_padding pad = { }; + char src[] = "Copy me fully into a small buffer and I will overflow!"; + + /* Destination is %NUL-filled to start with. */ + KUNIT_EXPECT_EQ(test, pad.bytes_before, 0); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* Legitimate strncpy() 1 less than of max size. */ + KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, + sizeof(pad.buf) + unconst - 1) + == pad.buf); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); + /* Only last byte should be %NUL */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + + /* Legitimate (though unterminated) max-size strncpy. */ + KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, + sizeof(pad.buf) + unconst) + == pad.buf); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); + /* No trailing %NUL -- thanks strncpy API. */ + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + /* But we will not have gone beyond. */ + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* Now verify that FORTIFY is working... */ + KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, + sizeof(pad.buf) + unconst + 1) + == pad.buf); + /* Should catch the overflow. */ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + /* And we will not have gone beyond. */ + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* And further... */ + KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, + sizeof(pad.buf) + unconst + 2) + == pad.buf); + /* Should catch the overflow. */ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + /* And we will not have gone beyond. */ + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); +} + +static void fortify_test_strscpy(struct kunit *test) +{ + struct fortify_padding pad = { }; + char src[] = "Copy me fully into a small buffer and I will overflow!"; + + /* Destination is %NUL-filled to start with. */ + KUNIT_EXPECT_EQ(test, pad.bytes_before, 0); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* Legitimate strscpy() 1 less than of max size. */ + KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, + sizeof(pad.buf) + unconst - 1), + -E2BIG); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); + /* Keeping space for %NUL, last two bytes should be %NUL */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + + /* Legitimate max-size strscpy. */ + KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, + sizeof(pad.buf) + unconst), + -E2BIG); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); + /* A trailing %NUL will exist. */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + + /* Now verify that FORTIFY is working... */ + KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, + sizeof(pad.buf) + unconst + 1), + -E2BIG); + /* Should catch the overflow. */ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + /* And we will not have gone beyond. */ + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* And much further... */ + KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, + sizeof(src) * 2 + unconst), + -E2BIG); + /* Should catch the overflow. */ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + /* And we will not have gone beyond. */ + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); +} + +static void fortify_test_strcat(struct kunit *test) +{ + struct fortify_padding pad = { }; + char src[sizeof(pad.buf) / 2] = { }; + char one[] = "A"; + char two[] = "BC"; + int i; + + /* Fill 15 bytes with valid characters. */ + for (i = 0; i < sizeof(src) - 1; i++) + src[i] = i + 'A'; + + /* Destination is %NUL-filled to start with. */ + KUNIT_EXPECT_EQ(test, pad.bytes_before, 0); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* Legitimate strcat() using less than half max size. */ + KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); + /* Legitimate strcat() now 2 bytes shy of end. */ + KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); + /* Last two bytes should be %NUL */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + + /* Add one more character to the end. */ + KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); + /* Last byte should be %NUL */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + + /* And this one char will overflow. */ + KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); + /* Last byte should be %NUL thanks to FORTIFY. */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* And adding two will overflow more. */ + KUNIT_ASSERT_TRUE(test, strcat(pad.buf, two) == pad.buf); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); + /* Last byte should be %NUL thanks to FORTIFY. */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); +} + +static void fortify_test_strncat(struct kunit *test) +{ + struct fortify_padding pad = { }; + char src[sizeof(pad.buf)] = { }; + int i, partial; + + /* Fill 31 bytes with valid characters. */ + partial = sizeof(src) / 2 - 1; + for (i = 0; i < partial; i++) + src[i] = i + 'A'; + + /* Destination is %NUL-filled to start with. */ + KUNIT_EXPECT_EQ(test, pad.bytes_before, 0); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* Legitimate strncat() using less than half max size. */ + KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); + /* Legitimate strncat() now 2 bytes shy of end. */ + KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); + /* Last two bytes should be %NUL */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + + /* Add one more character to the end. */ + KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); + /* Last byte should be %NUL */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + + /* And this one char will overflow. */ + KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); + /* Last byte should be %NUL thanks to FORTIFY. */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* And adding two will overflow more. */ + KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 2) == pad.buf); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); + /* Last byte should be %NUL thanks to FORTIFY. */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* Force an unterminated destination, and overflow. */ + pad.buf[sizeof(pad.buf) - 1] = 'A'; + KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf); + /* This will have tripped both strlen() and strcat(). */ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + /* But we should not go beyond the end. */ + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); +} + +static void fortify_test_strlcat(struct kunit *test) +{ + struct fortify_padding pad = { }; + char src[sizeof(pad.buf)] = { }; + int i, partial; + int len = sizeof(pad.buf) + unconst; + + /* Fill 15 bytes with valid characters. */ + partial = sizeof(src) / 2 - 1; + for (i = 0; i < partial; i++) + src[i] = i + 'A'; + + /* Destination is %NUL-filled to start with. */ + KUNIT_EXPECT_EQ(test, pad.bytes_before, 0); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* Legitimate strlcat() using less than half max size. */ + KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); + /* Legitimate strlcat() now 2 bytes shy of end. */ + KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial * 2); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); + /* Last two bytes should be %NUL */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + + /* Add one more character to the end. */ + KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "Q", len), partial * 2 + 1); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); + /* Last byte should be %NUL */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + + /* And this one char will overflow. */ + KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "V", len * 2), len); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); + /* Last byte should be %NUL thanks to FORTIFY. */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* And adding two will overflow more. */ + KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "QQ", len * 2), len + 1); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); + /* Last byte should be %NUL thanks to FORTIFY. */ + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* Force an unterminated destination, and overflow. */ + pad.buf[sizeof(pad.buf) - 1] = 'A'; + KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "TT", len * 2), len + 2); + /* This will have tripped both strlen() and strlcat(). */ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); + KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); + /* But we should not go beyond the end. */ + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); + + /* Force an unterminated source, and overflow. */ + memset(src, 'B', sizeof(src)); + pad.buf[sizeof(pad.buf) - 1] = '\0'; + KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len * 3), len - 1 + sizeof(src)); + /* This will have tripped both strlen() and strlcat(). */ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3); + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3); + KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); + /* But we should not go beyond the end. */ + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); +} + +/* Check for 0-sized arrays... */ +struct fortify_zero_sized { + unsigned long bytes_before; + char buf[0]; + unsigned long bytes_after; +}; + +#define __fortify_test(memfunc) \ +static void fortify_test_##memfunc(struct kunit *test) \ +{ \ + struct fortify_zero_sized zero = { }; \ + struct fortify_padding pad = { }; \ + char srcA[sizeof(pad.buf) + 2]; \ + char srcB[sizeof(pad.buf) + 2]; \ + size_t len = sizeof(pad.buf) + unconst; \ + \ + memset(srcA, 'A', sizeof(srcA)); \ + KUNIT_ASSERT_EQ(test, srcA[0], 'A'); \ + memset(srcB, 'B', sizeof(srcB)); \ + KUNIT_ASSERT_EQ(test, srcB[0], 'B'); \ + \ + memfunc(pad.buf, srcA, 0 + unconst); \ + KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf + 1, srcB, 1 + unconst); \ + KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \ + KUNIT_EXPECT_EQ(test, pad.buf[2], '\0'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, 1 + unconst); \ + KUNIT_EXPECT_EQ(test, pad.buf[0], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, len - 1); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.buf[len - 1], '\0'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, len); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.buf[len - 1], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, len + 1); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \ + memfunc(pad.buf + 1, srcB, len); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); \ + \ + /* Reset error counter. */ \ + fortify_write_overflows = 0; \ + /* Copy nothing into nothing: no errors. */ \ + memfunc(zero.buf, srcB, 0 + unconst); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + /* We currently explicitly ignore zero-sized dests. */ \ + memfunc(zero.buf, srcB, 1 + unconst); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ +} +__fortify_test(memcpy) +__fortify_test(memmove) + +static void fortify_test_memscan(struct kunit *test) +{ + char haystack[] = "Where oh where is my memory range?"; + char *mem = haystack + strlen("Where oh where is "); + char needle = 'm'; + size_t len = sizeof(haystack) + unconst; + + KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len), + mem); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + /* Catch too-large range. */ + KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len + 1), + NULL); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); + KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len * 2), + NULL); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); +} + +static void fortify_test_memchr(struct kunit *test) +{ + char haystack[] = "Where oh where is my memory range?"; + char *mem = haystack + strlen("Where oh where is "); + char needle = 'm'; + size_t len = sizeof(haystack) + unconst; + + KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len), + mem); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + /* Catch too-large range. */ + KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len + 1), + NULL); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); + KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len * 2), + NULL); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); +} + +static void fortify_test_memchr_inv(struct kunit *test) +{ + char haystack[] = "Where oh where is my memory range?"; + char *mem = haystack + 1; + char needle = 'W'; + size_t len = sizeof(haystack) + unconst; + + /* Normal search is okay. */ + KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len), + mem); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + /* Catch too-large range. */ + KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len + 1), + NULL); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); + KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len * 2), + NULL); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); +} + +static void fortify_test_memcmp(struct kunit *test) +{ + char one[] = "My mind is going ..."; + char two[] = "My mind is going ... I can feel it."; + size_t one_len = sizeof(one) + unconst - 1; + size_t two_len = sizeof(two) + unconst - 1; + + /* We match the first string (ignoring the %NUL). */ + KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + /* Still in bounds, but no longer matching. */ + KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 1), -32); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + + /* Catch too-large ranges. */ + KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 2), INT_MIN); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); + + KUNIT_ASSERT_EQ(test, memcmp(two, one, two_len + 2), INT_MIN); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); +} + +static void fortify_test_kmemdup(struct kunit *test) +{ + char src[] = "I got Doom running on it!"; + char *copy; + size_t len = sizeof(src) + unconst; + + /* Copy is within bounds. */ + copy = kmemdup(src, len, GFP_KERNEL); + KUNIT_EXPECT_NOT_NULL(test, copy); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + kfree(copy); + + /* Without %NUL. */ + copy = kmemdup(src, len - 1, GFP_KERNEL); + KUNIT_EXPECT_NOT_NULL(test, copy); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + kfree(copy); + + /* Tiny bounds. */ + copy = kmemdup(src, 1, GFP_KERNEL); + KUNIT_EXPECT_NOT_NULL(test, copy); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); + kfree(copy); + + /* Out of bounds by 1 byte. */ + copy = kmemdup(src, len + 1, GFP_KERNEL); + KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); + kfree(copy); + + /* Way out of bounds. */ + copy = kmemdup(src, len * 2, GFP_KERNEL); + KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); + kfree(copy); + + /* Starting offset causing out of bounds. */ + copy = kmemdup(src + 1, len, GFP_KERNEL); + KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR); + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3); + kfree(copy); +} + +static int fortify_test_init(struct kunit *test) +{ + if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE)) + kunit_skip(test, "Not built with CONFIG_FORTIFY_SOURCE=y"); + + fortify_read_overflows = 0; + kunit_add_named_resource(test, NULL, NULL, &read_resource, + "fortify_read_overflows", + &fortify_read_overflows); + fortify_write_overflows = 0; + kunit_add_named_resource(test, NULL, NULL, &write_resource, + "fortify_write_overflows", + &fortify_write_overflows); + return 0; +} + static struct kunit_case fortify_test_cases[] = { - KUNIT_CASE(known_sizes_test), - KUNIT_CASE(control_flow_split_test), - KUNIT_CASE(alloc_size_kmalloc_const_test), - KUNIT_CASE(alloc_size_kmalloc_dynamic_test), - KUNIT_CASE(alloc_size_vmalloc_const_test), - KUNIT_CASE(alloc_size_vmalloc_dynamic_test), - KUNIT_CASE(alloc_size_kvmalloc_const_test), - KUNIT_CASE(alloc_size_kvmalloc_dynamic_test), - KUNIT_CASE(alloc_size_devm_kmalloc_const_test), - KUNIT_CASE(alloc_size_devm_kmalloc_dynamic_test), + KUNIT_CASE(fortify_test_known_sizes), + KUNIT_CASE(fortify_test_control_flow_split), + KUNIT_CASE(fortify_test_alloc_size_kmalloc_const), + KUNIT_CASE(fortify_test_alloc_size_kmalloc_dynamic), + KUNIT_CASE(fortify_test_alloc_size_vmalloc_const), + KUNIT_CASE(fortify_test_alloc_size_vmalloc_dynamic), + KUNIT_CASE(fortify_test_alloc_size_kvmalloc_const), + KUNIT_CASE(fortify_test_alloc_size_kvmalloc_dynamic), + KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_const), + KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_dynamic), + KUNIT_CASE(fortify_test_realloc_size), + KUNIT_CASE(fortify_test_strlen), + KUNIT_CASE(fortify_test_strnlen), + KUNIT_CASE(fortify_test_strcpy), + KUNIT_CASE(fortify_test_strncpy), + KUNIT_CASE(fortify_test_strscpy), + KUNIT_CASE(fortify_test_strcat), + KUNIT_CASE(fortify_test_strncat), + KUNIT_CASE(fortify_test_strlcat), + /* skip memset: performs bounds checking on whole structs */ + KUNIT_CASE(fortify_test_memcpy), + KUNIT_CASE(fortify_test_memmove), + KUNIT_CASE(fortify_test_memscan), + KUNIT_CASE(fortify_test_memchr), + KUNIT_CASE(fortify_test_memchr_inv), + KUNIT_CASE(fortify_test_memcmp), + KUNIT_CASE(fortify_test_kmemdup), {} }; static struct kunit_suite fortify_test_suite = { .name = "fortify", + .init = fortify_test_init, .test_cases = fortify_test_cases, }; diff --git a/lib/fw_table.c b/lib/fw_table.c new file mode 100644 index 000000000000..16291814450e --- /dev/null +++ b/lib/fw_table.c @@ -0,0 +1,232 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * fw_tables.c - Parsing support for ACPI and ACPI-like tables provided by + * platform or device firmware + * + * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * Copyright (C) 2023 Intel Corp. + */ +#include <linux/errno.h> +#include <linux/acpi.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/fw_table.h> + +enum acpi_subtable_type { + ACPI_SUBTABLE_COMMON, + ACPI_SUBTABLE_HMAT, + ACPI_SUBTABLE_PRMT, + ACPI_SUBTABLE_CEDT, + CDAT_SUBTABLE, +}; + +struct acpi_subtable_entry { + union acpi_subtable_headers *hdr; + enum acpi_subtable_type type; +}; + +static unsigned long __init_or_fwtbl_lib +acpi_get_entry_type(struct acpi_subtable_entry *entry) +{ + switch (entry->type) { + case ACPI_SUBTABLE_COMMON: + return entry->hdr->common.type; + case ACPI_SUBTABLE_HMAT: + return entry->hdr->hmat.type; + case ACPI_SUBTABLE_PRMT: + return 0; + case ACPI_SUBTABLE_CEDT: + return entry->hdr->cedt.type; + case CDAT_SUBTABLE: + return entry->hdr->cdat.type; + } + return 0; +} + +static unsigned long __init_or_fwtbl_lib +acpi_get_entry_length(struct acpi_subtable_entry *entry) +{ + switch (entry->type) { + case ACPI_SUBTABLE_COMMON: + return entry->hdr->common.length; + case ACPI_SUBTABLE_HMAT: + return entry->hdr->hmat.length; + case ACPI_SUBTABLE_PRMT: + return entry->hdr->prmt.length; + case ACPI_SUBTABLE_CEDT: + return entry->hdr->cedt.length; + case CDAT_SUBTABLE: { + __le16 length = (__force __le16)entry->hdr->cdat.length; + + return le16_to_cpu(length); + } + } + return 0; +} + +static unsigned long __init_or_fwtbl_lib +acpi_get_subtable_header_length(struct acpi_subtable_entry *entry) +{ + switch (entry->type) { + case ACPI_SUBTABLE_COMMON: + return sizeof(entry->hdr->common); + case ACPI_SUBTABLE_HMAT: + return sizeof(entry->hdr->hmat); + case ACPI_SUBTABLE_PRMT: + return sizeof(entry->hdr->prmt); + case ACPI_SUBTABLE_CEDT: + return sizeof(entry->hdr->cedt); + case CDAT_SUBTABLE: + return sizeof(entry->hdr->cdat); + } + return 0; +} + +static enum acpi_subtable_type __init_or_fwtbl_lib +acpi_get_subtable_type(char *id) +{ + if (strncmp(id, ACPI_SIG_HMAT, 4) == 0) + return ACPI_SUBTABLE_HMAT; + if (strncmp(id, ACPI_SIG_PRMT, 4) == 0) + return ACPI_SUBTABLE_PRMT; + if (strncmp(id, ACPI_SIG_CEDT, 4) == 0) + return ACPI_SUBTABLE_CEDT; + if (strncmp(id, ACPI_SIG_CDAT, 4) == 0) + return CDAT_SUBTABLE; + return ACPI_SUBTABLE_COMMON; +} + +static unsigned long __init_or_fwtbl_lib +acpi_table_get_length(enum acpi_subtable_type type, + union fw_table_header *header) +{ + if (type == CDAT_SUBTABLE) { + __le32 length = (__force __le32)header->cdat.length; + + return le32_to_cpu(length); + } + + return header->acpi.length; +} + +static __init_or_fwtbl_lib int call_handler(struct acpi_subtable_proc *proc, + union acpi_subtable_headers *hdr, + unsigned long end) +{ + if (proc->handler) + return proc->handler(hdr, end); + if (proc->handler_arg) + return proc->handler_arg(hdr, proc->arg, end); + return -EINVAL; +} + +/** + * acpi_parse_entries_array - for each proc_num find a suitable subtable + * + * @id: table id (for debugging purposes) + * @table_size: size of the root table + * @max_length: maximum size of the table (ignore if 0) + * @table_header: where does the table start? + * @proc: array of acpi_subtable_proc struct containing entry id + * and associated handler with it + * @proc_num: how big proc is? + * @max_entries: how many entries can we process? + * + * For each proc_num find a subtable with proc->id and run proc->handler + * on it. Assumption is that there's only single handler for particular + * entry id. + * + * The table_size is not the size of the complete ACPI table (the length + * field in the header struct), but only the size of the root table; i.e., + * the offset from the very first byte of the complete ACPI table, to the + * first byte of the very first subtable. + * + * On success returns sum of all matching entries for all proc handlers. + * Otherwise, -ENODEV or -EINVAL is returned. + */ +int __init_or_fwtbl_lib +acpi_parse_entries_array(char *id, unsigned long table_size, + union fw_table_header *table_header, + unsigned long max_length, + struct acpi_subtable_proc *proc, + int proc_num, unsigned int max_entries) +{ + unsigned long table_len, table_end, subtable_len, entry_len; + struct acpi_subtable_entry entry; + enum acpi_subtable_type type; + int count = 0; + int i; + + type = acpi_get_subtable_type(id); + table_len = acpi_table_get_length(type, table_header); + if (max_length && max_length < table_len) + table_len = max_length; + table_end = (unsigned long)table_header + table_len; + + /* Parse all entries looking for a match. */ + + entry.type = type; + entry.hdr = (union acpi_subtable_headers *) + ((unsigned long)table_header + table_size); + subtable_len = acpi_get_subtable_header_length(&entry); + + while (((unsigned long)entry.hdr) + subtable_len < table_end) { + for (i = 0; i < proc_num; i++) { + if (acpi_get_entry_type(&entry) != proc[i].id) + continue; + + if (!max_entries || count < max_entries) + if (call_handler(&proc[i], entry.hdr, table_end)) + return -EINVAL; + + proc[i].count++; + count++; + break; + } + + /* + * If entry->length is 0, break from this loop to avoid + * infinite loop. + */ + entry_len = acpi_get_entry_length(&entry); + if (entry_len == 0) { + pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, proc->id); + return -EINVAL; + } + + entry.hdr = (union acpi_subtable_headers *) + ((unsigned long)entry.hdr + entry_len); + } + + if (max_entries && count > max_entries) { + pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n", + id, proc->id, count - max_entries, count); + } + + return count; +} + +int __init_or_fwtbl_lib +cdat_table_parse(enum acpi_cdat_type type, + acpi_tbl_entry_handler_arg handler_arg, + void *arg, + struct acpi_table_cdat *table_header, + unsigned long length) +{ + struct acpi_subtable_proc proc = { + .id = type, + .handler_arg = handler_arg, + .arg = arg, + }; + + if (!table_header) + return -EINVAL; + + return acpi_parse_entries_array(ACPI_SIG_CDAT, + sizeof(struct acpi_table_cdat), + (union fw_table_header *)table_header, + length, &proc, 1, 0); +} +EXPORT_SYMBOL_FWTBL_LIB(cdat_table_parse); diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c index 41f1bcdc4488..aaefb9b678c8 100644 --- a/lib/generic-radix-tree.c +++ b/lib/generic-radix-tree.c @@ -5,7 +5,7 @@ #include <linux/gfp.h> #include <linux/kmemleak.h> -#define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *)) +#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *)) #define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY) struct genradix_node { @@ -14,13 +14,13 @@ struct genradix_node { struct genradix_node *children[GENRADIX_ARY]; /* Leaf: */ - u8 data[PAGE_SIZE]; + u8 data[GENRADIX_NODE_SIZE]; }; }; static inline int genradix_depth_shift(unsigned depth) { - return PAGE_SHIFT + GENRADIX_ARY_SHIFT * depth; + return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth; } /* @@ -33,7 +33,7 @@ static inline size_t genradix_depth_size(unsigned depth) /* depth that's needed for a genradix that can address up to ULONG_MAX: */ #define GENRADIX_MAX_DEPTH \ - DIV_ROUND_UP(BITS_PER_LONG - PAGE_SHIFT, GENRADIX_ARY_SHIFT) + DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT) #define GENRADIX_DEPTH_MASK \ ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1)) @@ -79,23 +79,12 @@ EXPORT_SYMBOL(__genradix_ptr); static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) { - struct genradix_node *node; - - node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO); - - /* - * We're using pages (not slab allocations) directly for kernel data - * structures, so we need to explicitly inform kmemleak of them in order - * to avoid false positive memory leak reports. - */ - kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask); - return node; + return kzalloc(GENRADIX_NODE_SIZE, gfp_mask); } static inline void genradix_free_node(struct genradix_node *node) { - kmemleak_free(node); - free_page((unsigned long)node); + kfree(node); } /* @@ -200,7 +189,7 @@ restart: i++; iter->offset = round_down(iter->offset + objs_per_ptr, objs_per_ptr); - iter->pos = (iter->offset >> PAGE_SHIFT) * + iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page; if (i == GENRADIX_ARY) goto restart; @@ -209,7 +198,7 @@ restart: n = n->children[i]; } - return &n->data[iter->offset & (PAGE_SIZE - 1)]; + return &n->data[iter->offset & (GENRADIX_NODE_SIZE - 1)]; } EXPORT_SYMBOL(__genradix_iter_peek); @@ -235,7 +224,7 @@ restart: if (ilog2(iter->offset) >= genradix_depth_shift(level)) { iter->offset = genradix_depth_size(level); - iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page; + iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page; iter->offset -= obj_size_plus_page_remainder; iter->pos--; @@ -251,7 +240,7 @@ restart: size_t objs_per_ptr = genradix_depth_size(level); iter->offset = round_down(iter->offset, objs_per_ptr); - iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page; + iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page; if (!iter->offset) return NULL; @@ -267,7 +256,7 @@ restart: n = n->children[i]; } - return &n->data[iter->offset & (PAGE_SIZE - 1)]; + return &n->data[iter->offset & (GENRADIX_NODE_SIZE - 1)]; } EXPORT_SYMBOL(__genradix_iter_peek_prev); @@ -289,7 +278,7 @@ int __genradix_prealloc(struct __genradix *radix, size_t size, { size_t offset; - for (offset = 0; offset < size; offset += PAGE_SIZE) + for (offset = 0; offset < size; offset += GENRADIX_NODE_SIZE) if (!__genradix_ptr_alloc(radix, offset, gfp_mask)) return -ENOMEM; diff --git a/lib/group_cpus.c b/lib/group_cpus.c index aa3f6815bb12..ee272c4cefcc 100644 --- a/lib/group_cpus.c +++ b/lib/group_cpus.c @@ -366,13 +366,25 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) if (!masks) goto fail_node_to_cpumask; - /* Stabilize the cpumasks */ - cpus_read_lock(); build_node_to_cpumask(node_to_cpumask); + /* + * Make a local cache of 'cpu_present_mask', so the two stages + * spread can observe consistent 'cpu_present_mask' without holding + * cpu hotplug lock, then we can reduce deadlock risk with cpu + * hotplug code. + * + * Here CPU hotplug may happen when reading `cpu_present_mask`, and + * we can live with the case because it only affects that hotplug + * CPU is handled in the 1st or 2nd stage, and either way is correct + * from API user viewpoint since 2-stage spread is sort of + * optimization. + */ + cpumask_copy(npresmsk, data_race(cpu_present_mask)); + /* grouping present CPUs first */ ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask, - cpu_present_mask, nmsk, masks); + npresmsk, nmsk, masks); if (ret < 0) goto fail_build_affinity; nr_present = ret; @@ -387,15 +399,13 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) curgrp = 0; else curgrp = nr_present; - cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask); + cpumask_andnot(npresmsk, cpu_possible_mask, npresmsk); ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask, npresmsk, nmsk, masks); if (ret >= 0) nr_others = ret; fail_build_affinity: - cpus_read_unlock(); - if (ret >= 0) WARN_ON(nr_present + nr_others < numgrps); diff --git a/lib/idr.c b/lib/idr.c index 13f2758c2377..da36054c3ca0 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -508,7 +508,7 @@ void ida_free(struct ida *ida, unsigned int id) goto delete; xas_store(&xas, xa_mk_value(v)); } else { - if (!test_bit(bit, bitmap->bitmap)) + if (!bitmap || !test_bit(bit, bitmap->bitmap)) goto err; __clear_bit(bit, bitmap->bitmap); xas_set_mark(&xas, XA_FREE_MARK); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index de7d11cf4c63..4a6a9f419bd7 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -166,7 +166,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter) { .iter_type = ITER_IOVEC, - .copy_mc = false, .nofault = false, .data_source = direction, .__iov = iov, @@ -245,26 +244,8 @@ EXPORT_SYMBOL_GPL(_copy_mc_to_iter); #endif /* CONFIG_ARCH_HAS_COPY_MC */ static __always_inline -size_t memcpy_from_iter_mc(void *iter_from, size_t progress, - size_t len, void *to, void *priv2) -{ - return copy_mc_to_kernel(to + progress, iter_from, len); -} - -static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i) -{ - if (unlikely(i->count < bytes)) - bytes = i->count; - if (unlikely(!bytes)) - return 0; - return iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc); -} - -static __always_inline size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(iov_iter_is_copy_mc(i))) - return __copy_from_iter_mc(addr, bytes, i); return iterate_and_advance(i, bytes, addr, copy_from_user_iter, memcpy_from_iter); } @@ -409,7 +390,7 @@ size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t byte void *kaddr = kmap_local_page(page); size_t n = min(bytes, (size_t)PAGE_SIZE - offset); - n = iterate_and_advance(i, bytes, kaddr, + n = iterate_and_advance(i, n, kaddr + offset, copy_to_user_iter_nofault, memcpy_to_iter); kunmap_local(kaddr); @@ -633,7 +614,6 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction, WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter){ .iter_type = ITER_KVEC, - .copy_mc = false, .data_source = direction, .kvec = kvec, .nr_segs = nr_segs, @@ -650,7 +630,6 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction, WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter){ .iter_type = ITER_BVEC, - .copy_mc = false, .data_source = direction, .bvec = bvec, .nr_segs = nr_segs, @@ -679,7 +658,6 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction, BUG_ON(direction & ~1); *i = (struct iov_iter) { .iter_type = ITER_XARRAY, - .copy_mc = false, .data_source = direction, .xarray = xarray, .xarray_start = start, @@ -703,7 +681,6 @@ void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) BUG_ON(direction != READ); *i = (struct iov_iter){ .iter_type = ITER_DISCARD, - .copy_mc = false, .data_source = false, .count = count, .iov_offset = 0 @@ -714,12 +691,11 @@ EXPORT_SYMBOL(iov_iter_discard); static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, unsigned len_mask) { + const struct iovec *iov = iter_iov(i); size_t size = i->count; size_t skip = i->iov_offset; - unsigned k; - for (k = 0; k < i->nr_segs; k++, skip = 0) { - const struct iovec *iov = iter_iov(i) + k; + do { size_t len = iov->iov_len - skip; if (len > size) @@ -729,34 +705,36 @@ static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, if ((unsigned long)(iov->iov_base + skip) & addr_mask) return false; + iov++; size -= len; - if (!size) - break; - } + skip = 0; + } while (size); + return true; } static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, unsigned len_mask) { - size_t size = i->count; + const struct bio_vec *bvec = i->bvec; unsigned skip = i->iov_offset; - unsigned k; + size_t size = i->count; - for (k = 0; k < i->nr_segs; k++, skip = 0) { - size_t len = i->bvec[k].bv_len - skip; + do { + size_t len = bvec->bv_len; if (len > size) len = size; if (len & len_mask) return false; - if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) + if ((unsigned long)(bvec->bv_offset + skip) & addr_mask) return false; + bvec++; size -= len; - if (!size) - break; - } + skip = 0; + } while (size); + return true; } @@ -800,13 +778,12 @@ EXPORT_SYMBOL_GPL(iov_iter_is_aligned); static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) { + const struct iovec *iov = iter_iov(i); unsigned long res = 0; size_t size = i->count; size_t skip = i->iov_offset; - unsigned k; - for (k = 0; k < i->nr_segs; k++, skip = 0) { - const struct iovec *iov = iter_iov(i) + k; + do { size_t len = iov->iov_len - skip; if (len) { res |= (unsigned long)iov->iov_base + skip; @@ -814,30 +791,31 @@ static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) len = size; res |= len; size -= len; - if (!size) - break; } - } + iov++; + skip = 0; + } while (size); return res; } static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) { + const struct bio_vec *bvec = i->bvec; unsigned res = 0; size_t size = i->count; unsigned skip = i->iov_offset; - unsigned k; - for (k = 0; k < i->nr_segs; k++, skip = 0) { - size_t len = i->bvec[k].bv_len - skip; - res |= (unsigned long)i->bvec[k].bv_offset + skip; + do { + size_t len = bvec->bv_len - skip; + res |= (unsigned long)bvec->bv_offset + skip; if (len > size) len = size; res |= len; + bvec++; size -= len; - if (!size) - break; - } + skip = 0; + } while (size); + return res; } @@ -1166,11 +1144,12 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) EXPORT_SYMBOL(dup_iter); static __noclone int copy_compat_iovec_from_user(struct iovec *iov, - const struct iovec __user *uvec, unsigned long nr_segs) + const struct iovec __user *uvec, u32 nr_segs) { const struct compat_iovec __user *uiov = (const struct compat_iovec __user *)uvec; - int ret = -EFAULT, i; + int ret = -EFAULT; + u32 i; if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) return -EFAULT; @@ -1369,19 +1348,6 @@ ssize_t import_iovec(int type, const struct iovec __user *uvec, } EXPORT_SYMBOL(import_iovec); -int import_single_range(int rw, void __user *buf, size_t len, - struct iovec *iov, struct iov_iter *i) -{ - if (len > MAX_RW_COUNT) - len = MAX_RW_COUNT; - if (unlikely(!access_ok(buf, len))) - return -EFAULT; - - iov_iter_ubuf(i, rw, buf, len); - return 0; -} -EXPORT_SYMBOL(import_single_range); - int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i) { if (len > MAX_RW_COUNT) diff --git a/lib/kobject.c b/lib/kobject.c index 59dbcbdb1c91..72fa20f405f1 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -74,10 +74,12 @@ static int create_dir(struct kobject *kobj) if (error) return error; - error = sysfs_create_groups(kobj, ktype->default_groups); - if (error) { - sysfs_remove_dir(kobj); - return error; + if (ktype) { + error = sysfs_create_groups(kobj, ktype->default_groups); + if (error) { + sysfs_remove_dir(kobj); + return error; + } } /* @@ -589,7 +591,8 @@ static void __kobject_del(struct kobject *kobj) sd = kobj->sd; ktype = get_ktype(kobj); - sysfs_remove_groups(kobj, ktype->default_groups); + if (ktype) + sysfs_remove_groups(kobj, ktype->default_groups); /* send "remove" if the caller did not do it but sent "add" */ if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) { @@ -666,6 +669,10 @@ static void kobject_cleanup(struct kobject *kobj) pr_debug("'%s' (%p): %s, parent %p\n", kobject_name(kobj), kobj, __func__, kobj->parent); + if (t && !t->release) + pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n", + kobject_name(kobj), kobj); + /* remove from sysfs if the caller did not do it */ if (kobj->state_in_sysfs) { pr_debug("'%s' (%p): auto cleanup kobject_del\n", @@ -676,13 +683,10 @@ static void kobject_cleanup(struct kobject *kobj) parent = NULL; } - if (t->release) { + if (t && t->release) { pr_debug("'%s' (%p): calling ktype release\n", kobject_name(kobj), kobj); t->release(kobj); - } else { - pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n", - kobject_name(kobj), kobj); } /* free name if we allocated it */ @@ -1056,7 +1060,7 @@ const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *pa { const struct kobj_ns_type_operations *ops = NULL; - if (parent && parent->ktype->child_ns_type) + if (parent && parent->ktype && parent->ktype->child_ns_type) ops = parent->ktype->child_ns_type(parent); return ops; diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index fb9a2f06dd1e..03b427e2707e 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -30,7 +30,7 @@ #include <net/net_namespace.h> -u64 uevent_seqnum; +atomic64_t uevent_seqnum; #ifdef CONFIG_UEVENT_HELPER char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; #endif @@ -42,10 +42,9 @@ struct uevent_sock { #ifdef CONFIG_NET static LIST_HEAD(uevent_sock_list); -#endif - -/* This lock protects uevent_seqnum and uevent_sock_list */ +/* This lock protects uevent_sock_list */ static DEFINE_MUTEX(uevent_sock_mutex); +#endif /* the strings here must match the enum in include/linux/kobject.h */ static const char *kobject_actions[] = { @@ -315,6 +314,7 @@ static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env, int retval = 0; /* send netlink message */ + mutex_lock(&uevent_sock_mutex); list_for_each_entry(ue_sk, &uevent_sock_list, list) { struct sock *uevent_sock = ue_sk->sk; @@ -334,6 +334,7 @@ static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env, if (retval == -ENOBUFS || retval == -ESRCH) retval = 0; } + mutex_unlock(&uevent_sock_mutex); consume_skb(skb); return retval; @@ -583,16 +584,14 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, break; } - mutex_lock(&uevent_sock_mutex); /* we will send an event, so request a new sequence number */ - retval = add_uevent_var(env, "SEQNUM=%llu", ++uevent_seqnum); - if (retval) { - mutex_unlock(&uevent_sock_mutex); + retval = add_uevent_var(env, "SEQNUM=%llu", + atomic64_inc_return(&uevent_seqnum)); + if (retval) goto exit; - } + retval = kobject_uevent_net_broadcast(kobj, env, action_string, devpath); - mutex_unlock(&uevent_sock_mutex); #ifdef CONFIG_UEVENT_HELPER /* call uevent_helper, usually only enabled during early boot */ @@ -688,7 +687,8 @@ static int uevent_net_broadcast(struct sock *usk, struct sk_buff *skb, int ret; /* bump and prepare sequence number */ - ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu", ++uevent_seqnum); + ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu", + atomic64_inc_return(&uevent_seqnum)); if (ret < 0 || (size_t)ret >= sizeof(buf)) return -ENOMEM; ret++; @@ -742,9 +742,7 @@ static int uevent_net_rcv_skb(struct sk_buff *skb, struct nlmsghdr *nlh, return -EPERM; } - mutex_lock(&uevent_sock_mutex); ret = uevent_net_broadcast(net->uevent_sock->sk, skb, extack); - mutex_unlock(&uevent_sock_mutex); return ret; } diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig index 68a6daec0aef..34d7242d526d 100644 --- a/lib/kunit/Kconfig +++ b/lib/kunit/Kconfig @@ -24,6 +24,17 @@ config KUNIT_DEBUGFS test suite, which allow users to see results of the last test suite run that occurred. +config KUNIT_FAULT_TEST + bool "Enable KUnit tests which print BUG stacktraces" + depends on KUNIT_TEST + depends on !UML + default y + help + Enables fault handling tests for the KUnit framework. These tests may + trigger a kernel BUG(), and the associated stack trace, even when they + pass. If this conflicts with your test infrastrcture (or is confusing + or annoying), they can be disabled by setting this to N. + config KUNIT_TEST tristate "KUnit test for KUnit" if !KUNIT_ALL_TESTS default KUNIT_ALL_TESTS diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile index 46f75f23dfe4..309659a32a78 100644 --- a/lib/kunit/Makefile +++ b/lib/kunit/Makefile @@ -7,7 +7,8 @@ kunit-objs += test.o \ assert.o \ try-catch.o \ executor.o \ - attributes.o + attributes.o \ + device.o ifeq ($(CONFIG_KUNIT_DEBUGFS),y) kunit-objs += debugfs.o diff --git a/lib/kunit/attributes.c b/lib/kunit/attributes.c index 1b512f7e1838..2cf04cc09372 100644 --- a/lib/kunit/attributes.c +++ b/lib/kunit/attributes.c @@ -58,6 +58,16 @@ static const char *attr_enum_to_string(void *attr, const char * const str_list[] return str_list[val]; } +static const char *attr_bool_to_string(void *attr, bool *to_free) +{ + bool val = (bool)attr; + + *to_free = false; + if (val) + return "true"; + return "false"; +} + static const char *attr_speed_to_string(void *attr, bool *to_free) { return attr_enum_to_string(attr, speed_str_list, to_free); @@ -166,6 +176,37 @@ static int attr_string_filter(void *attr, const char *input, int *err) return false; } +static int attr_bool_filter(void *attr, const char *input, int *err) +{ + int i, input_int = -1; + long val = (long)attr; + const char *input_str = NULL; + + for (i = 0; input[i]; i++) { + if (!strchr(op_list, input[i])) { + input_str = input + i; + break; + } + } + + if (!input_str) { + *err = -EINVAL; + pr_err("kunit executor: filter value not found: %s\n", input); + return false; + } + + if (!strcmp(input_str, "true")) + input_int = (int)true; + else if (!strcmp(input_str, "false")) + input_int = (int)false; + else { + *err = -EINVAL; + pr_err("kunit executor: invalid filter input: %s\n", input); + return false; + } + + return int_filter(val, input, input_int, err); +} /* Get Attribute Methods */ @@ -194,6 +235,17 @@ static void *attr_module_get(void *test_or_suite, bool is_test) return (void *) ""; } +static void *attr_is_init_get(void *test_or_suite, bool is_test) +{ + struct kunit_suite *suite = is_test ? NULL : test_or_suite; + struct kunit_case *test = is_test ? test_or_suite : NULL; + + if (test) + return ((void *) NULL); + else + return ((void *) suite->is_init); +} + /* List of all Test Attributes */ static struct kunit_attr kunit_attr_list[] = { @@ -212,6 +264,14 @@ static struct kunit_attr kunit_attr_list[] = { .filter = attr_string_filter, .attr_default = (void *)"", .print = PRINT_SUITE, + }, + { + .name = "is_init", + .get_attr = attr_is_init_get, + .to_string = attr_bool_to_string, + .filter = attr_bool_filter, + .attr_default = (void *)false, + .print = PRINT_SUITE, } }; diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c index 270d185737e6..d548750a325a 100644 --- a/lib/kunit/debugfs.c +++ b/lib/kunit/debugfs.c @@ -8,12 +8,14 @@ #include <linux/module.h> #include <kunit/test.h> +#include <kunit/test-bug.h> #include "string-stream.h" #include "debugfs.h" #define KUNIT_DEBUGFS_ROOT "kunit" #define KUNIT_DEBUGFS_RESULTS "results" +#define KUNIT_DEBUGFS_RUN "run" /* * Create a debugfs representation of test suites: @@ -21,6 +23,8 @@ * Path Semantics * /sys/kernel/debug/kunit/<testsuite>/results Show results of last run for * testsuite + * /sys/kernel/debug/kunit/<testsuite>/run Write to this file to trigger + * testsuite to run * */ @@ -60,12 +64,14 @@ static void debugfs_print_result(struct seq_file *seq, struct string_stream *log static int debugfs_print_results(struct seq_file *seq, void *v) { struct kunit_suite *suite = (struct kunit_suite *)seq->private; - enum kunit_status success = kunit_suite_has_succeeded(suite); + enum kunit_status success; struct kunit_case *test_case; if (!suite) return 0; + success = kunit_suite_has_succeeded(suite); + /* Print KTAP header so the debugfs log can be parsed as valid KTAP. */ seq_puts(seq, "KTAP version 1\n"); seq_puts(seq, "1..1\n"); @@ -99,6 +105,51 @@ static int debugfs_results_open(struct inode *inode, struct file *file) return single_open(file, debugfs_print_results, suite); } +/* + * Print a usage message to the debugfs "run" file + * (/sys/kernel/debug/kunit/<testsuite>/run) if opened. + */ +static int debugfs_print_run(struct seq_file *seq, void *v) +{ + struct kunit_suite *suite = (struct kunit_suite *)seq->private; + + seq_puts(seq, "Write to this file to trigger the test suite to run.\n"); + seq_printf(seq, "usage: echo \"any string\" > /sys/kernel/debugfs/kunit/%s/run\n", + suite->name); + return 0; +} + +/* + * The debugfs "run" file (/sys/kernel/debug/kunit/<testsuite>/run) + * contains no information. Write to the file to trigger the test suite + * to run. + */ +static int debugfs_run_open(struct inode *inode, struct file *file) +{ + struct kunit_suite *suite; + + suite = (struct kunit_suite *)inode->i_private; + + return single_open(file, debugfs_print_run, suite); +} + +/* + * Trigger a test suite to run by writing to the suite's "run" debugfs + * file found at: /sys/kernel/debug/kunit/<testsuite>/run + * + * Note: what is written to this file will not be saved. + */ +static ssize_t debugfs_run(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct inode *f_inode = file->f_inode; + struct kunit_suite *suite = (struct kunit_suite *) f_inode->i_private; + + __kunit_test_suites_init(&suite, 1); + + return count; +} + static const struct file_operations debugfs_results_fops = { .open = debugfs_results_open, .read = seq_read, @@ -106,17 +157,43 @@ static const struct file_operations debugfs_results_fops = { .release = debugfs_release, }; +static const struct file_operations debugfs_run_fops = { + .open = debugfs_run_open, + .read = seq_read, + .write = debugfs_run, + .llseek = seq_lseek, + .release = debugfs_release, +}; + void kunit_debugfs_create_suite(struct kunit_suite *suite) { struct kunit_case *test_case; + struct string_stream *stream; + + /* If suite log already allocated, do not create new debugfs files. */ + if (suite->log) + return; - /* Allocate logs before creating debugfs representation. */ - suite->log = alloc_string_stream(GFP_KERNEL); - string_stream_set_append_newlines(suite->log, true); + /* + * Allocate logs before creating debugfs representation. + * The suite->log and test_case->log pointer are expected to be NULL + * if there isn't a log, so only set it if the log stream was created + * successfully. + */ + stream = alloc_string_stream(GFP_KERNEL); + if (IS_ERR_OR_NULL(stream)) + return; + + string_stream_set_append_newlines(stream, true); + suite->log = stream; kunit_suite_for_each_test_case(suite, test_case) { - test_case->log = alloc_string_stream(GFP_KERNEL); - string_stream_set_append_newlines(test_case->log, true); + stream = alloc_string_stream(GFP_KERNEL); + if (IS_ERR_OR_NULL(stream)) + goto err; + + string_stream_set_append_newlines(stream, true); + test_case->log = stream; } suite->debugfs = debugfs_create_dir(suite->name, debugfs_rootdir); @@ -124,6 +201,19 @@ void kunit_debugfs_create_suite(struct kunit_suite *suite) debugfs_create_file(KUNIT_DEBUGFS_RESULTS, S_IFREG | 0444, suite->debugfs, suite, &debugfs_results_fops); + + /* Do not create file to re-run test if test runs on init */ + if (!suite->is_init) { + debugfs_create_file(KUNIT_DEBUGFS_RUN, S_IFREG | 0644, + suite->debugfs, + suite, &debugfs_run_fops); + } + return; + +err: + string_stream_destroy(suite->log); + kunit_suite_for_each_test_case(suite, test_case) + string_stream_destroy(test_case->log); } void kunit_debugfs_destroy_suite(struct kunit_suite *suite) diff --git a/lib/kunit/device-impl.h b/lib/kunit/device-impl.h new file mode 100644 index 000000000000..5fcd48ff0f36 --- /dev/null +++ b/lib/kunit/device-impl.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * KUnit internal header for device helpers + * + * Header for KUnit-internal driver / bus management. + * + * Copyright (C) 2023, Google LLC. + * Author: David Gow <davidgow@google.com> + */ + +#ifndef _KUNIT_DEVICE_IMPL_H +#define _KUNIT_DEVICE_IMPL_H + +// For internal use only -- registers the kunit_bus. +int kunit_bus_init(void); +// For internal use only -- unregisters the kunit_bus. +void kunit_bus_shutdown(void); + +#endif //_KUNIT_DEVICE_IMPL_H diff --git a/lib/kunit/device.c b/lib/kunit/device.c new file mode 100644 index 000000000000..25c81ed465fb --- /dev/null +++ b/lib/kunit/device.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit-managed device implementation + * + * Implementation of struct kunit_device helpers for fake devices whose + * lifecycle is managed by KUnit. + * + * Copyright (C) 2023, Google LLC. + * Author: David Gow <davidgow@google.com> + */ + +#include <linux/device.h> +#include <linux/dma-mapping.h> + +#include <kunit/test.h> +#include <kunit/device.h> +#include <kunit/resource.h> + +#include "device-impl.h" + +/* Wrappers for use with kunit_add_action() */ +KUNIT_DEFINE_ACTION_WRAPPER(device_unregister_wrapper, device_unregister, struct device *); +KUNIT_DEFINE_ACTION_WRAPPER(driver_unregister_wrapper, driver_unregister, struct device_driver *); + +/* The root device for the KUnit bus, parent of all kunit_devices. */ +static struct device *kunit_bus_device; + +/* A device owned by a KUnit test. */ +struct kunit_device { + struct device dev; + /* The KUnit test which owns this device. */ + struct kunit *owner; + /* If the driver is managed by KUnit and unique to this device. */ + const struct device_driver *driver; +}; + +#define to_kunit_device(d) container_of_const(d, struct kunit_device, dev) + +static const struct bus_type kunit_bus_type = { + .name = "kunit", +}; + +/* Register the 'kunit_bus' used for fake devices. */ +int kunit_bus_init(void) +{ + int error; + + kunit_bus_device = root_device_register("kunit"); + if (IS_ERR(kunit_bus_device)) + return PTR_ERR(kunit_bus_device); + + error = bus_register(&kunit_bus_type); + if (error) + root_device_unregister(kunit_bus_device); + return error; +} + +/* Unregister the 'kunit_bus' in case the KUnit module is unloaded. */ +void kunit_bus_shutdown(void) +{ + /* Make sure the bus exists before we unregister it. */ + if (IS_ERR_OR_NULL(kunit_bus_device)) + return; + + bus_unregister(&kunit_bus_type); + + root_device_unregister(kunit_bus_device); + + kunit_bus_device = NULL; +} + +/* Release a 'fake' KUnit device. */ +static void kunit_device_release(struct device *d) +{ + kfree(to_kunit_device(d)); +} + +/* + * Create and register a KUnit-managed struct device_driver on the kunit_bus. + * Returns an error pointer on failure. + */ +struct device_driver *kunit_driver_create(struct kunit *test, const char *name) +{ + struct device_driver *driver; + int err = -ENOMEM; + + driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL); + + if (!driver) + return ERR_PTR(err); + + driver->name = name; + driver->bus = &kunit_bus_type; + driver->owner = THIS_MODULE; + + err = driver_register(driver); + if (err) { + kunit_kfree(test, driver); + return ERR_PTR(err); + } + + kunit_add_action(test, driver_unregister_wrapper, driver); + return driver; +} +EXPORT_SYMBOL_GPL(kunit_driver_create); + +/* Helper which creates a kunit_device, attaches it to the kunit_bus*/ +static struct kunit_device *kunit_device_register_internal(struct kunit *test, + const char *name, + const struct device_driver *drv) +{ + struct kunit_device *kunit_dev; + int err = -ENOMEM; + + kunit_dev = kzalloc(sizeof(*kunit_dev), GFP_KERNEL); + if (!kunit_dev) + return ERR_PTR(err); + + kunit_dev->owner = test; + + err = dev_set_name(&kunit_dev->dev, "%s.%s", test->name, name); + if (err) { + kfree(kunit_dev); + return ERR_PTR(err); + } + + kunit_dev->dev.release = kunit_device_release; + kunit_dev->dev.bus = &kunit_bus_type; + kunit_dev->dev.parent = kunit_bus_device; + + err = device_register(&kunit_dev->dev); + if (err) { + put_device(&kunit_dev->dev); + return ERR_PTR(err); + } + + kunit_dev->dev.dma_mask = &kunit_dev->dev.coherent_dma_mask; + kunit_dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + + kunit_add_action(test, device_unregister_wrapper, &kunit_dev->dev); + + return kunit_dev; +} + +/* + * Create and register a new KUnit-managed device, using the user-supplied device_driver. + * On failure, returns an error pointer. + */ +struct device *kunit_device_register_with_driver(struct kunit *test, + const char *name, + const struct device_driver *drv) +{ + struct kunit_device *kunit_dev = kunit_device_register_internal(test, name, drv); + + if (IS_ERR_OR_NULL(kunit_dev)) + return ERR_CAST(kunit_dev); + + return &kunit_dev->dev; +} +EXPORT_SYMBOL_GPL(kunit_device_register_with_driver); + +/* + * Create and register a new KUnit-managed device, including a matching device_driver. + * On failure, returns an error pointer. + */ +struct device *kunit_device_register(struct kunit *test, const char *name) +{ + struct device_driver *drv; + struct kunit_device *dev; + + drv = kunit_driver_create(test, name); + if (IS_ERR(drv)) + return ERR_CAST(drv); + + dev = kunit_device_register_internal(test, name, drv); + if (IS_ERR(dev)) { + kunit_release_action(test, driver_unregister_wrapper, (void *)drv); + return ERR_CAST(dev); + } + + /* Request the driver be freed. */ + dev->driver = drv; + + + return &dev->dev; +} +EXPORT_SYMBOL_GPL(kunit_device_register); + +/* Unregisters a KUnit-managed device early (including the driver, if automatically created). */ +void kunit_device_unregister(struct kunit *test, struct device *dev) +{ + const struct device_driver *driver = to_kunit_device(dev)->driver; + + kunit_release_action(test, device_unregister_wrapper, dev); + if (driver) + kunit_release_action(test, driver_unregister_wrapper, (void *)driver); +} +EXPORT_SYMBOL_GPL(kunit_device_unregister); + diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c index 1236b3cd2fbb..70b9a43cd257 100644 --- a/lib/kunit/executor.c +++ b/lib/kunit/executor.c @@ -12,6 +12,8 @@ */ extern struct kunit_suite * const __kunit_suites_start[]; extern struct kunit_suite * const __kunit_suites_end[]; +extern struct kunit_suite * const __kunit_init_suites_start[]; +extern struct kunit_suite * const __kunit_init_suites_end[]; static char *action_param; @@ -31,13 +33,13 @@ static char *filter_glob_param; static char *filter_param; static char *filter_action_param; -module_param_named(filter_glob, filter_glob_param, charp, 0400); +module_param_named(filter_glob, filter_glob_param, charp, 0600); MODULE_PARM_DESC(filter_glob, "Filter which KUnit test suites/tests run at boot-time, e.g. list* or list*.*del_test"); -module_param_named(filter, filter_param, charp, 0400); +module_param_named(filter, filter_param, charp, 0600); MODULE_PARM_DESC(filter, "Filter which KUnit test suites/tests run at boot-time using attributes, e.g. speed>slow"); -module_param_named(filter_action, filter_action_param, charp, 0400); +module_param_named(filter_action, filter_action_param, charp, 0600); MODULE_PARM_DESC(filter_action, "Changes behavior of filtered tests using attributes, valid values are:\n" "<none>: do not run filtered tests as normal\n" @@ -144,6 +146,10 @@ void kunit_free_suite_set(struct kunit_suite_set suite_set) kfree(suite_set.start); } +/* + * Filter and reallocate test suites. Must return the filtered test suites set + * allocated at a valid virtual address or NULL in case of error. + */ struct kunit_suite_set kunit_filter_suites(const struct kunit_suite_set *suite_set, const char *filter_glob, @@ -292,6 +298,37 @@ void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr) } } +struct kunit_suite_set kunit_merge_suite_sets(struct kunit_suite_set init_suite_set, + struct kunit_suite_set suite_set) +{ + struct kunit_suite_set total_suite_set = {NULL, NULL}; + struct kunit_suite **total_suite_start = NULL; + size_t init_num_suites, num_suites, suite_size; + int i = 0; + + init_num_suites = init_suite_set.end - init_suite_set.start; + num_suites = suite_set.end - suite_set.start; + suite_size = sizeof(suite_set.start); + + /* Allocate memory for array of all kunit suites */ + total_suite_start = kmalloc_array(init_num_suites + num_suites, suite_size, GFP_KERNEL); + if (!total_suite_start) + return total_suite_set; + + /* Append and mark init suites and then append all other kunit suites */ + memcpy(total_suite_start, init_suite_set.start, init_num_suites * suite_size); + for (i = 0; i < init_num_suites; i++) + total_suite_start[i]->is_init = true; + + memcpy(total_suite_start + init_num_suites, suite_set.start, num_suites * suite_size); + + /* Set kunit suite set start and end */ + total_suite_set.start = total_suite_start; + total_suite_set.end = total_suite_start + (init_num_suites + num_suites); + + return total_suite_set; +} + #if IS_BUILTIN(CONFIG_KUNIT) static char *kunit_shutdown; @@ -313,21 +350,41 @@ static void kunit_handle_shutdown(void) int kunit_run_all_tests(void) { - struct kunit_suite_set suite_set = { + struct kunit_suite_set suite_set = {NULL, NULL}; + struct kunit_suite_set filtered_suite_set = {NULL, NULL}; + struct kunit_suite_set init_suite_set = { + __kunit_init_suites_start, __kunit_init_suites_end, + }; + struct kunit_suite_set normal_suite_set = { __kunit_suites_start, __kunit_suites_end, }; + size_t init_num_suites = init_suite_set.end - init_suite_set.start; int err = 0; + + if (init_num_suites > 0) { + suite_set = kunit_merge_suite_sets(init_suite_set, normal_suite_set); + if (!suite_set.start) + goto out; + } else + suite_set = normal_suite_set; + if (!kunit_enabled()) { pr_info("kunit: disabled\n"); - goto out; + goto free_out; } if (filter_glob_param || filter_param) { - suite_set = kunit_filter_suites(&suite_set, filter_glob_param, + filtered_suite_set = kunit_filter_suites(&suite_set, filter_glob_param, filter_param, filter_action_param, &err); + + /* Free original suite set before using filtered suite set */ + if (init_num_suites > 0) + kfree(suite_set.start); + suite_set = filtered_suite_set; + if (err) { pr_err("kunit executor: error filtering suites: %d\n", err); - goto out; + goto free_out; } } @@ -340,9 +397,12 @@ int kunit_run_all_tests(void) else pr_err("kunit executor: unknown action '%s'\n", action_param); - if (filter_glob_param || filter_param) { /* a copy was made of each suite */ +free_out: + if (filter_glob_param || filter_param) kunit_free_suite_set(suite_set); - } + else if (init_num_suites > 0) + /* Don't use kunit_free_suite_set because suites aren't individually allocated */ + kfree(suite_set.start); out: kunit_handle_shutdown(); diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c index 22d4ee86dbed..3f7f967e3688 100644 --- a/lib/kunit/executor_test.c +++ b/lib/kunit/executor_test.c @@ -129,7 +129,7 @@ static void parse_filter_attr_test(struct kunit *test) GFP_KERNEL); for (j = 0; j < filter_count; j++) { parsed_filters[j] = kunit_next_attr_filter(&filter, &err); - KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]); + KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter from '%s'", filters); } KUNIT_EXPECT_STREQ(test, kunit_attr_filter_name(parsed_filters[0]), "speed"); diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c index 6bb5c2ef6696..798924f7cc86 100644 --- a/lib/kunit/kunit-example-test.c +++ b/lib/kunit/kunit-example-test.c @@ -169,6 +169,16 @@ static int subtract_one(int i) } /* + * If the function to be replaced is static within a module it is + * useful to export a pointer to that function instead of having + * to change the static function to a non-static exported function. + * + * This pointer simulates a module exporting a pointer to a static + * function. + */ +static int (* const add_one_fn_ptr)(int i) = add_one; + +/* * This test shows the use of static stubs. */ static void example_static_stub_test(struct kunit *test) @@ -187,6 +197,30 @@ static void example_static_stub_test(struct kunit *test) KUNIT_EXPECT_EQ(test, add_one(1), 2); } +/* + * This test shows the use of static stubs when the function being + * replaced is provided as a pointer-to-function instead of the + * actual function. This is useful for providing access to static + * functions in a module by exporting a pointer to that function + * instead of having to change the static function to a non-static + * exported function. + */ +static void example_static_stub_using_fn_ptr_test(struct kunit *test) +{ + /* By default, function is not stubbed. */ + KUNIT_EXPECT_EQ(test, add_one(1), 2); + + /* Replace add_one() with subtract_one(). */ + kunit_activate_static_stub(test, add_one_fn_ptr, subtract_one); + + /* add_one() is now replaced. */ + KUNIT_EXPECT_EQ(test, add_one(1), 0); + + /* Return add_one() to normal. */ + kunit_deactivate_static_stub(test, add_one_fn_ptr); + KUNIT_EXPECT_EQ(test, add_one(1), 2); +} + static const struct example_param { int value; } example_params_array[] = { @@ -222,6 +256,20 @@ static void example_params_test(struct kunit *test) } /* + * This test shows the use of test->priv. + */ +static void example_priv_test(struct kunit *test) +{ + /* unless setup in suite->init(), test->priv is NULL */ + KUNIT_ASSERT_NULL(test, test->priv); + + /* but can be used to pass arbitrary data to other functions */ + test->priv = kunit_kzalloc(test, 1, GFP_KERNEL); + KUNIT_EXPECT_NOT_NULL(test, test->priv); + KUNIT_ASSERT_PTR_EQ(test, test->priv, kunit_get_current_test()->priv); +} + +/* * This test should always pass. Can be used to practice filtering attributes. */ static void example_slow_test(struct kunit *test) @@ -245,6 +293,8 @@ static struct kunit_case example_test_cases[] = { KUNIT_CASE(example_mark_skipped_test), KUNIT_CASE(example_all_expect_macros_test), KUNIT_CASE(example_static_stub_test), + KUNIT_CASE(example_static_stub_using_fn_ptr_test), + KUNIT_CASE(example_priv_test), KUNIT_CASE_PARAM(example_params_test, example_gen_params), KUNIT_CASE_SLOW(example_slow_test), {} @@ -287,4 +337,41 @@ static struct kunit_suite example_test_suite = { */ kunit_test_suites(&example_test_suite); +static int __init init_add(int x, int y) +{ + return (x + y); +} + +/* + * This test should always pass. Can be used to test init suites. + */ +static void __init example_init_test(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, init_add(1, 1), 2); +} + +/* + * The kunit_case struct cannot be marked as __initdata as this will be + * used in debugfs to retrieve results after test has run + */ +static struct kunit_case __refdata example_init_test_cases[] = { + KUNIT_CASE(example_init_test), + {} +}; + +/* + * The kunit_suite struct cannot be marked as __initdata as this will be + * used in debugfs to retrieve results after test has run + */ +static struct kunit_suite example_init_test_suite = { + .name = "example_init", + .test_cases = example_init_test_cases, +}; + +/* + * This registers the test suite and marks the suite as using init data + * and/or functions. + */ +kunit_test_init_section_suites(&example_init_test_suite); + MODULE_LICENSE("GPL v2"); diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 99d2a3a528e1..e3412e0ca399 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -5,9 +5,13 @@ * Copyright (C) 2019, Google LLC. * Author: Brendan Higgins <brendanhiggins@google.com> */ +#include "linux/gfp_types.h" #include <kunit/test.h> #include <kunit/test-bug.h> +#include <linux/device.h> +#include <kunit/device.h> + #include "string-stream.h" #include "try-catch-impl.h" @@ -105,6 +109,48 @@ static struct kunit_suite kunit_try_catch_test_suite = { .test_cases = kunit_try_catch_test_cases, }; +#if IS_ENABLED(CONFIG_KUNIT_FAULT_TEST) + +static void kunit_test_null_dereference(void *data) +{ + struct kunit *test = data; + int *null = NULL; + + *null = 0; + + KUNIT_FAIL(test, "This line should never be reached\n"); +} + +static void kunit_test_fault_null_dereference(struct kunit *test) +{ + struct kunit_try_catch_test_context *ctx = test->priv; + struct kunit_try_catch *try_catch = ctx->try_catch; + + kunit_try_catch_init(try_catch, + test, + kunit_test_null_dereference, + kunit_test_catch); + kunit_try_catch_run(try_catch, test); + + KUNIT_EXPECT_EQ(test, try_catch->try_result, -EINTR); + KUNIT_EXPECT_TRUE(test, ctx->function_called); +} + +#endif /* CONFIG_KUNIT_FAULT_TEST */ + +static struct kunit_case kunit_fault_test_cases[] = { +#if IS_ENABLED(CONFIG_KUNIT_FAULT_TEST) + KUNIT_CASE(kunit_test_fault_null_dereference), +#endif /* CONFIG_KUNIT_FAULT_TEST */ + {} +}; + +static struct kunit_suite kunit_fault_test_suite = { + .name = "kunit_fault", + .init = kunit_try_catch_test_init, + .test_cases = kunit_fault_test_cases, +}; + /* * Context for testing test managed resources * is_resource_initialized is used to test arbitrary resources @@ -538,10 +584,7 @@ static struct kunit_suite kunit_resource_test_suite = { #if IS_BUILTIN(CONFIG_KUNIT_TEST) /* This avoids a cast warning if kfree() is passed direct to kunit_add_action(). */ -static void kfree_wrapper(void *p) -{ - kfree(p); -} +KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *); static void kunit_log_test(struct kunit *test) { @@ -562,7 +605,7 @@ static void kunit_log_test(struct kunit *test) KUNIT_EXPECT_TRUE(test, test->log->append_newlines); full_log = string_stream_get_string(test->log); - kunit_add_action(test, (kunit_action_t *)kfree, full_log); + kunit_add_action(test, kfree_wrapper, full_log); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, strstr(full_log, "put this in log.")); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, @@ -690,6 +733,134 @@ static struct kunit_case kunit_current_test_cases[] = { {} }; +static void test_dev_action(void *priv) +{ + *(void **)priv = (void *)1; +} + +static void kunit_device_test(struct kunit *test) +{ + struct device *test_device; + long action_was_run = 0; + + test_device = kunit_device_register(test, "my_device"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device); + + // Add an action to verify cleanup. + devm_add_action(test_device, test_dev_action, &action_was_run); + + KUNIT_EXPECT_EQ(test, action_was_run, 0); + + kunit_device_unregister(test, test_device); + + KUNIT_EXPECT_EQ(test, action_was_run, 1); +} + +static void kunit_device_cleanup_test(struct kunit *test) +{ + struct device *test_device; + long action_was_run = 0; + + test_device = kunit_device_register(test, "my_device"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device); + + /* Add an action to verify cleanup. */ + devm_add_action(test_device, test_dev_action, &action_was_run); + + KUNIT_EXPECT_EQ(test, action_was_run, 0); + + /* Force KUnit to run cleanup early. */ + kunit_cleanup(test); + + KUNIT_EXPECT_EQ(test, action_was_run, 1); +} + +struct driver_test_state { + bool driver_device_probed; + bool driver_device_removed; + long action_was_run; +}; + +static int driver_probe_hook(struct device *dev) +{ + struct kunit *test = kunit_get_current_test(); + struct driver_test_state *state = (struct driver_test_state *)test->priv; + + state->driver_device_probed = true; + return 0; +} + +static int driver_remove_hook(struct device *dev) +{ + struct kunit *test = kunit_get_current_test(); + struct driver_test_state *state = (struct driver_test_state *)test->priv; + + state->driver_device_removed = true; + return 0; +} + +static void kunit_device_driver_test(struct kunit *test) +{ + struct device_driver *test_driver; + struct device *test_device; + struct driver_test_state *test_state = kunit_kzalloc(test, sizeof(*test_state), GFP_KERNEL); + + test->priv = test_state; + test_driver = kunit_driver_create(test, "my_driver"); + + // This can fail with an error pointer. + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_driver); + + test_driver->probe = driver_probe_hook; + test_driver->remove = driver_remove_hook; + + test_device = kunit_device_register_with_driver(test, "my_device", test_driver); + + // This can fail with an error pointer. + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device); + + // Make sure the probe function was called. + KUNIT_ASSERT_TRUE(test, test_state->driver_device_probed); + + // Add an action to verify cleanup. + devm_add_action(test_device, test_dev_action, &test_state->action_was_run); + + KUNIT_EXPECT_EQ(test, test_state->action_was_run, 0); + + kunit_device_unregister(test, test_device); + test_device = NULL; + + // Make sure the remove hook was called. + KUNIT_ASSERT_TRUE(test, test_state->driver_device_removed); + + // We're going to test this again. + test_state->driver_device_probed = false; + + // The driver should not automatically be destroyed by + // kunit_device_unregister, so we can re-use it. + test_device = kunit_device_register_with_driver(test, "my_device", test_driver); + + // This can fail with an error pointer. + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device); + + // Probe was called again. + KUNIT_ASSERT_TRUE(test, test_state->driver_device_probed); + + // Everything is automatically freed here. +} + +static struct kunit_case kunit_device_test_cases[] = { + KUNIT_CASE(kunit_device_test), + KUNIT_CASE(kunit_device_cleanup_test), + KUNIT_CASE(kunit_device_driver_test), + {} +}; + +static struct kunit_suite kunit_device_test_suite = { + .name = "kunit_device", + .test_cases = kunit_device_test_cases, +}; + static struct kunit_suite kunit_current_test_suite = { .name = "kunit_current", .test_cases = kunit_current_test_cases, @@ -697,6 +868,7 @@ static struct kunit_suite kunit_current_test_suite = { kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite, &kunit_log_test_suite, &kunit_status_test_suite, - &kunit_current_test_suite); + &kunit_current_test_suite, &kunit_device_test_suite, + &kunit_fault_test_suite); MODULE_LICENSE("GPL v2"); diff --git a/lib/kunit/string-stream-test.c b/lib/kunit/string-stream-test.c index 06822766f29a..7511442ea98f 100644 --- a/lib/kunit/string-stream-test.c +++ b/lib/kunit/string-stream-test.c @@ -22,18 +22,10 @@ struct string_stream_test_priv { }; /* Avoids a cast warning if kfree() is passed direct to kunit_add_action(). */ -static void kfree_wrapper(void *p) -{ - kfree(p); -} +KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *); /* Avoids a cast warning if string_stream_destroy() is passed direct to kunit_add_action(). */ -static void cleanup_raw_stream(void *p) -{ - struct string_stream *stream = p; - - string_stream_destroy(stream); -} +KUNIT_DEFINE_ACTION_WRAPPER(cleanup_raw_stream, string_stream_destroy, struct string_stream *); static char *get_concatenated_string(struct kunit *test, struct string_stream *stream) { @@ -72,7 +64,7 @@ static void string_stream_unmanaged_init_test(struct kunit *test) KUNIT_EXPECT_EQ(test, stream->length, 0); KUNIT_EXPECT_TRUE(test, list_empty(&stream->fragments)); - KUNIT_EXPECT_EQ(test, stream->gfp, GFP_KERNEL); + KUNIT_EXPECT_TRUE(test, (stream->gfp == GFP_KERNEL)); KUNIT_EXPECT_FALSE(test, stream->append_newlines); KUNIT_EXPECT_TRUE(test, string_stream_is_empty(stream)); diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c index a6f3616c2048..54f4fdcbfac8 100644 --- a/lib/kunit/string-stream.c +++ b/lib/kunit/string-stream.c @@ -173,7 +173,7 @@ void string_stream_destroy(struct string_stream *stream) { KUNIT_STATIC_STUB_REDIRECT(string_stream_destroy, stream); - if (!stream) + if (IS_ERR_OR_NULL(stream)) return; string_stream_clear(stream); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index f2eb71f1a66c..b8514dbb337c 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -13,15 +13,20 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/mutex.h> #include <linux/panic.h> #include <linux/sched/debug.h> #include <linux/sched.h> +#include <linux/mm.h> #include "debugfs.h" +#include "device-impl.h" #include "hooks-impl.h" #include "string-stream.h" #include "try-catch-impl.h" +static DEFINE_MUTEX(kunit_run_lock); + /* * Hook to fail the current test and print an error message to the log. */ @@ -338,6 +343,36 @@ void kunit_init_test(struct kunit *test, const char *name, struct string_stream } EXPORT_SYMBOL_GPL(kunit_init_test); +/* Only warn when a test takes more than twice the threshold */ +#define KUNIT_SPEED_WARNING_MULTIPLIER 2 + +/* Slow tests are defined as taking more than 1s */ +#define KUNIT_SPEED_SLOW_THRESHOLD_S 1 + +#define KUNIT_SPEED_SLOW_WARNING_THRESHOLD_S \ + (KUNIT_SPEED_WARNING_MULTIPLIER * KUNIT_SPEED_SLOW_THRESHOLD_S) + +#define s_to_timespec64(s) ns_to_timespec64((s) * NSEC_PER_SEC) + +static void kunit_run_case_check_speed(struct kunit *test, + struct kunit_case *test_case, + struct timespec64 duration) +{ + struct timespec64 slow_thr = + s_to_timespec64(KUNIT_SPEED_SLOW_WARNING_THRESHOLD_S); + enum kunit_speed speed = test_case->attr.speed; + + if (timespec64_compare(&duration, &slow_thr) < 0) + return; + + if (speed == KUNIT_SPEED_VERY_SLOW || speed == KUNIT_SPEED_SLOW) + return; + + kunit_warn(test, + "Test should be marked slow (runtime: %lld.%09lds)", + duration.tv_sec, duration.tv_nsec); +} + /* * Initializes and runs test case. Does not clean up or do post validations. */ @@ -345,6 +380,8 @@ static void kunit_run_case_internal(struct kunit *test, struct kunit_suite *suite, struct kunit_case *test_case) { + struct timespec64 start, end; + if (suite->init) { int ret; @@ -356,7 +393,13 @@ static void kunit_run_case_internal(struct kunit *test, } } + ktime_get_ts64(&start); + test_case->run_case(test); + + ktime_get_ts64(&end); + + kunit_run_case_check_speed(test, test_case, timespec64_sub(end, start)); } static void kunit_case_internal_cleanup(struct kunit *test) @@ -622,6 +665,7 @@ int kunit_run_tests(struct kunit_suite *suite) test.param_index++; test.status = KUNIT_SUCCESS; test.status_comment[0] = '\0'; + test.priv = NULL; } } @@ -654,6 +698,9 @@ static void kunit_init_suite(struct kunit_suite *suite) kunit_debugfs_create_suite(suite); suite->status_comment[0] = '\0'; suite->suite_init_err = 0; + + if (suite->log) + string_stream_clear(suite->log); } bool kunit_enabled(void) @@ -665,11 +712,21 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_ { unsigned int i; + if (num_suites == 0) + return 0; + if (!kunit_enabled() && num_suites > 0) { pr_info("kunit: disabled\n"); return 0; } + kunit_suite_counter = 1; + + /* Use mutex lock to guard against running tests concurrently. */ + if (mutex_lock_interruptible(&kunit_run_lock)) { + pr_err("kunit: test interrupted\n"); + return -EINTR; + } static_branch_inc(&kunit_running); for (i = 0; i < num_suites; i++) { @@ -678,6 +735,7 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_ } static_branch_dec(&kunit_running); + mutex_unlock(&kunit_run_lock); return 0; } EXPORT_SYMBOL_GPL(__kunit_test_suites_init); @@ -696,36 +754,46 @@ void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites) for (i = 0; i < num_suites; i++) kunit_exit_suite(suites[i]); - - kunit_suite_counter = 1; } EXPORT_SYMBOL_GPL(__kunit_test_suites_exit); #ifdef CONFIG_MODULES static void kunit_module_init(struct module *mod) { - struct kunit_suite_set suite_set = { + struct kunit_suite_set suite_set, filtered_set; + struct kunit_suite_set normal_suite_set = { mod->kunit_suites, mod->kunit_suites + mod->num_kunit_suites, }; + struct kunit_suite_set init_suite_set = { + mod->kunit_init_suites, mod->kunit_init_suites + mod->num_kunit_init_suites, + }; const char *action = kunit_action(); int err = 0; - suite_set = kunit_filter_suites(&suite_set, + if (mod->num_kunit_init_suites > 0) + suite_set = kunit_merge_suite_sets(init_suite_set, normal_suite_set); + else + suite_set = normal_suite_set; + + filtered_set = kunit_filter_suites(&suite_set, kunit_filter_glob() ?: "*.*", kunit_filter(), kunit_filter_action(), &err); if (err) pr_err("kunit module: error filtering suites: %d\n", err); - mod->kunit_suites = (struct kunit_suite **)suite_set.start; - mod->num_kunit_suites = suite_set.end - suite_set.start; + mod->kunit_suites = (struct kunit_suite **)filtered_set.start; + mod->num_kunit_suites = filtered_set.end - filtered_set.start; + + if (mod->num_kunit_init_suites > 0) + kfree(suite_set.start); if (!action) - kunit_exec_run_tests(&suite_set, false); + kunit_exec_run_tests(&filtered_set, false); else if (!strcmp(action, "list")) - kunit_exec_list_tests(&suite_set, false); + kunit_exec_list_tests(&filtered_set, false); else if (!strcmp(action, "list_attr")) - kunit_exec_list_tests(&suite_set, true); + kunit_exec_list_tests(&filtered_set, true); else pr_err("kunit: unknown action '%s'\n", action); } @@ -737,12 +805,19 @@ static void kunit_module_exit(struct module *mod) }; const char *action = kunit_action(); + /* + * Check if the start address is a valid virtual address to detect + * if the module load sequence has failed and the suite set has not + * been initialized and filtered. + */ + if (!suite_set.start || !virt_addr_valid(suite_set.start)) + return; + if (!action) __kunit_test_suites_exit(mod->kunit_suites, mod->num_kunit_suites); - if (suite_set.start) - kunit_free_suite_set(suite_set); + kunit_free_suite_set(suite_set); } static int kunit_module_notify(struct notifier_block *nb, unsigned long val, @@ -752,12 +827,12 @@ static int kunit_module_notify(struct notifier_block *nb, unsigned long val, switch (val) { case MODULE_STATE_LIVE: + kunit_module_init(mod); break; case MODULE_STATE_GOING: kunit_module_exit(mod); break; case MODULE_STATE_COMING: - kunit_module_init(mod); break; case MODULE_STATE_UNFORMED: break; @@ -772,6 +847,8 @@ static struct notifier_block kunit_mod_nb = { }; #endif +KUNIT_DEFINE_ACTION_WRAPPER(kfree_action_wrapper, kfree, const void *) + void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp) { void *data; @@ -781,7 +858,7 @@ void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp) if (!data) return NULL; - if (kunit_add_action_or_reset(test, (kunit_action_t *)kfree, data) != 0) + if (kunit_add_action_or_reset(test, kfree_action_wrapper, data) != 0) return NULL; return data; @@ -793,7 +870,7 @@ void kunit_kfree(struct kunit *test, const void *ptr) if (!ptr) return; - kunit_release_action(test, (kunit_action_t *)kfree, (void *)ptr); + kunit_release_action(test, kfree_action_wrapper, (void *)ptr); } EXPORT_SYMBOL_GPL(kunit_kfree); @@ -838,6 +915,8 @@ static int __init kunit_init(void) kunit_install_hooks(); kunit_debugfs_init(); + + kunit_bus_init(); #ifdef CONFIG_MODULES return register_module_notifier(&kunit_mod_nb); #else @@ -852,6 +931,9 @@ static void __exit kunit_exit(void) #ifdef CONFIG_MODULES unregister_module_notifier(&kunit_mod_nb); #endif + + kunit_bus_shutdown(); + kunit_debugfs_cleanup(); } module_exit(kunit_exit); diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c index f7825991d576..6bbe0025b079 100644 --- a/lib/kunit/try-catch.c +++ b/lib/kunit/try-catch.c @@ -11,13 +11,14 @@ #include <linux/completion.h> #include <linux/kernel.h> #include <linux/kthread.h> +#include <linux/sched/task.h> #include "try-catch-impl.h" void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch) { try_catch->try_result = -EFAULT; - kthread_complete_and_exit(try_catch->try_completion, -EFAULT); + kthread_exit(0); } EXPORT_SYMBOL_GPL(kunit_try_catch_throw); @@ -25,9 +26,12 @@ static int kunit_generic_run_threadfn_adapter(void *data) { struct kunit_try_catch *try_catch = data; + try_catch->try_result = -EINTR; try_catch->try(try_catch->context); + if (try_catch->try_result == -EINTR) + try_catch->try_result = 0; - kthread_complete_and_exit(try_catch->try_completion, 0); + return 0; } static unsigned long kunit_test_timeout(void) @@ -57,30 +61,38 @@ static unsigned long kunit_test_timeout(void) void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context) { - DECLARE_COMPLETION_ONSTACK(try_completion); struct kunit *test = try_catch->test; struct task_struct *task_struct; + struct completion *task_done; int exit_code, time_remaining; try_catch->context = context; - try_catch->try_completion = &try_completion; try_catch->try_result = 0; - task_struct = kthread_run(kunit_generic_run_threadfn_adapter, - try_catch, - "kunit_try_catch_thread"); + task_struct = kthread_create(kunit_generic_run_threadfn_adapter, + try_catch, "kunit_try_catch_thread"); if (IS_ERR(task_struct)) { + try_catch->try_result = PTR_ERR(task_struct); try_catch->catch(try_catch->context); return; } + get_task_struct(task_struct); + /* + * As for a vfork(2), task_struct->vfork_done (pointing to the + * underlying kthread->exited) can be used to wait for the end of a + * kernel thread. It is set to NULL when the thread exits, so we + * keep a copy here. + */ + task_done = task_struct->vfork_done; + wake_up_process(task_struct); - time_remaining = wait_for_completion_timeout(&try_completion, + time_remaining = wait_for_completion_timeout(task_done, kunit_test_timeout()); if (time_remaining == 0) { - kunit_err(test, "try timed out\n"); try_catch->try_result = -ETIMEDOUT; kthread_stop(task_struct); } + put_task_struct(task_struct); exit_code = try_catch->try_result; if (!exit_code) @@ -88,8 +100,14 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context) if (exit_code == -EFAULT) try_catch->try_result = 0; - else if (exit_code == -EINTR) - kunit_err(test, "wake_up_process() was never called\n"); + else if (exit_code == -EINTR) { + if (test->last_seen.file) + kunit_err(test, "try faulted: last line seen %s:%d\n", + test->last_seen.file, test->last_seen.line); + else + kunit_err(test, "try faulted\n"); + } else if (exit_code == -ETIMEDOUT) + kunit_err(test, "try timed out\n"); else if (exit_code) kunit_err(test, "Unknown error: %d\n", exit_code); diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c index 859b67c4d697..27e0c8ee71d8 100644 --- a/lib/kunit_iov_iter.c +++ b/lib/kunit_iov_iter.c @@ -139,7 +139,7 @@ static void __init iov_kunit_copy_to_kvec(struct kunit *test) return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -194,7 +194,7 @@ stop: return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } struct bvec_test_range { @@ -302,7 +302,7 @@ static void __init iov_kunit_copy_to_bvec(struct kunit *test) return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -359,7 +359,7 @@ stop: return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } static void iov_kunit_destroy_xarray(void *data) @@ -453,7 +453,7 @@ static void __init iov_kunit_copy_to_xarray(struct kunit *test) return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -516,7 +516,7 @@ stop: return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -596,7 +596,7 @@ static void __init iov_kunit_extract_pages_kvec(struct kunit *test) stop: KUNIT_EXPECT_EQ(test, size, 0); KUNIT_EXPECT_EQ(test, iter.count, 0); - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -674,7 +674,7 @@ static void __init iov_kunit_extract_pages_bvec(struct kunit *test) stop: KUNIT_EXPECT_EQ(test, size, 0); KUNIT_EXPECT_EQ(test, iter.count, 0); - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -753,7 +753,7 @@ static void __init iov_kunit_extract_pages_xarray(struct kunit *test) } stop: - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } static struct kunit_case __refdata iov_kunit_cases[] = { diff --git a/lib/livepatch/Makefile b/lib/livepatch/Makefile deleted file mode 100644 index dcc912b3478f..000000000000 --- a/lib/livepatch/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Makefile for livepatch test code. - -obj-$(CONFIG_TEST_LIVEPATCH) += test_klp_atomic_replace.o \ - test_klp_callbacks_demo.o \ - test_klp_callbacks_demo2.o \ - test_klp_callbacks_busy.o \ - test_klp_callbacks_mod.o \ - test_klp_livepatch.o \ - test_klp_shadow_vars.o \ - test_klp_state.o \ - test_klp_state2.o \ - test_klp_state3.o diff --git a/lib/livepatch/test_klp_atomic_replace.c b/lib/livepatch/test_klp_atomic_replace.c deleted file mode 100644 index 5af7093ca00c..000000000000 --- a/lib/livepatch/test_klp_atomic_replace.c +++ /dev/null @@ -1,57 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/livepatch.h> - -static int replace; -module_param(replace, int, 0644); -MODULE_PARM_DESC(replace, "replace (default=0)"); - -#include <linux/seq_file.h> -static int livepatch_meminfo_proc_show(struct seq_file *m, void *v) -{ - seq_printf(m, "%s: %s\n", THIS_MODULE->name, - "this has been live patched"); - return 0; -} - -static struct klp_func funcs[] = { - { - .old_name = "meminfo_proc_show", - .new_func = livepatch_meminfo_proc_show, - }, {} -}; - -static struct klp_object objs[] = { - { - /* name being NULL means vmlinux */ - .funcs = funcs, - }, {} -}; - -static struct klp_patch patch = { - .mod = THIS_MODULE, - .objs = objs, - /* set .replace in the init function below for demo purposes */ -}; - -static int test_klp_atomic_replace_init(void) -{ - patch.replace = replace; - return klp_enable_patch(&patch); -} - -static void test_klp_atomic_replace_exit(void) -{ -} - -module_init(test_klp_atomic_replace_init); -module_exit(test_klp_atomic_replace_exit); -MODULE_LICENSE("GPL"); -MODULE_INFO(livepatch, "Y"); -MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); -MODULE_DESCRIPTION("Livepatch test: atomic replace"); diff --git a/lib/livepatch/test_klp_callbacks_busy.c b/lib/livepatch/test_klp_callbacks_busy.c deleted file mode 100644 index 133929e0ce8f..000000000000 --- a/lib/livepatch/test_klp_callbacks_busy.c +++ /dev/null @@ -1,70 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/workqueue.h> -#include <linux/delay.h> - -/* load/run-time control from sysfs writer */ -static bool block_transition; -module_param(block_transition, bool, 0644); -MODULE_PARM_DESC(block_transition, "block_transition (default=false)"); - -static void busymod_work_func(struct work_struct *work); -static DECLARE_WORK(work, busymod_work_func); -static DECLARE_COMPLETION(busymod_work_started); - -static void busymod_work_func(struct work_struct *work) -{ - pr_info("%s enter\n", __func__); - complete(&busymod_work_started); - - while (READ_ONCE(block_transition)) { - /* - * Busy-wait until the sysfs writer has acknowledged a - * blocked transition and clears the flag. - */ - msleep(20); - } - - pr_info("%s exit\n", __func__); -} - -static int test_klp_callbacks_busy_init(void) -{ - pr_info("%s\n", __func__); - schedule_work(&work); - - /* - * To synchronize kernel messages, hold the init function from - * exiting until the work function's entry message has printed. - */ - wait_for_completion(&busymod_work_started); - - if (!block_transition) { - /* - * Serialize output: print all messages from the work - * function before returning from init(). - */ - flush_work(&work); - } - - return 0; -} - -static void test_klp_callbacks_busy_exit(void) -{ - WRITE_ONCE(block_transition, false); - flush_work(&work); - pr_info("%s\n", __func__); -} - -module_init(test_klp_callbacks_busy_init); -module_exit(test_klp_callbacks_busy_exit); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); -MODULE_DESCRIPTION("Livepatch test: busy target module"); diff --git a/lib/livepatch/test_klp_callbacks_demo.c b/lib/livepatch/test_klp_callbacks_demo.c deleted file mode 100644 index 3fd8fe1cd1cc..000000000000 --- a/lib/livepatch/test_klp_callbacks_demo.c +++ /dev/null @@ -1,121 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/livepatch.h> - -static int pre_patch_ret; -module_param(pre_patch_ret, int, 0644); -MODULE_PARM_DESC(pre_patch_ret, "pre_patch_ret (default=0)"); - -static const char *const module_state[] = { - [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state", - [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init", - [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away", - [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up", -}; - -static void callback_info(const char *callback, struct klp_object *obj) -{ - if (obj->mod) - pr_info("%s: %s -> %s\n", callback, obj->mod->name, - module_state[obj->mod->state]); - else - pr_info("%s: vmlinux\n", callback); -} - -/* Executed on object patching (ie, patch enablement) */ -static int pre_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - return pre_patch_ret; -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void pre_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); -} - -static void patched_work_func(struct work_struct *work) -{ - pr_info("%s\n", __func__); -} - -static struct klp_func no_funcs[] = { - {} -}; - -static struct klp_func busymod_funcs[] = { - { - .old_name = "busymod_work_func", - .new_func = patched_work_func, - }, {} -}; - -static struct klp_object objs[] = { - { - .name = NULL, /* vmlinux */ - .funcs = no_funcs, - .callbacks = { - .pre_patch = pre_patch_callback, - .post_patch = post_patch_callback, - .pre_unpatch = pre_unpatch_callback, - .post_unpatch = post_unpatch_callback, - }, - }, { - .name = "test_klp_callbacks_mod", - .funcs = no_funcs, - .callbacks = { - .pre_patch = pre_patch_callback, - .post_patch = post_patch_callback, - .pre_unpatch = pre_unpatch_callback, - .post_unpatch = post_unpatch_callback, - }, - }, { - .name = "test_klp_callbacks_busy", - .funcs = busymod_funcs, - .callbacks = { - .pre_patch = pre_patch_callback, - .post_patch = post_patch_callback, - .pre_unpatch = pre_unpatch_callback, - .post_unpatch = post_unpatch_callback, - }, - }, { } -}; - -static struct klp_patch patch = { - .mod = THIS_MODULE, - .objs = objs, -}; - -static int test_klp_callbacks_demo_init(void) -{ - return klp_enable_patch(&patch); -} - -static void test_klp_callbacks_demo_exit(void) -{ -} - -module_init(test_klp_callbacks_demo_init); -module_exit(test_klp_callbacks_demo_exit); -MODULE_LICENSE("GPL"); -MODULE_INFO(livepatch, "Y"); -MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); -MODULE_DESCRIPTION("Livepatch test: livepatch demo"); diff --git a/lib/livepatch/test_klp_callbacks_demo2.c b/lib/livepatch/test_klp_callbacks_demo2.c deleted file mode 100644 index 5417573e80af..000000000000 --- a/lib/livepatch/test_klp_callbacks_demo2.c +++ /dev/null @@ -1,93 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/livepatch.h> - -static int replace; -module_param(replace, int, 0644); -MODULE_PARM_DESC(replace, "replace (default=0)"); - -static const char *const module_state[] = { - [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state", - [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init", - [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away", - [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up", -}; - -static void callback_info(const char *callback, struct klp_object *obj) -{ - if (obj->mod) - pr_info("%s: %s -> %s\n", callback, obj->mod->name, - module_state[obj->mod->state]); - else - pr_info("%s: vmlinux\n", callback); -} - -/* Executed on object patching (ie, patch enablement) */ -static int pre_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - return 0; -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void pre_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); -} - -static struct klp_func no_funcs[] = { - { } -}; - -static struct klp_object objs[] = { - { - .name = NULL, /* vmlinux */ - .funcs = no_funcs, - .callbacks = { - .pre_patch = pre_patch_callback, - .post_patch = post_patch_callback, - .pre_unpatch = pre_unpatch_callback, - .post_unpatch = post_unpatch_callback, - }, - }, { } -}; - -static struct klp_patch patch = { - .mod = THIS_MODULE, - .objs = objs, - /* set .replace in the init function below for demo purposes */ -}; - -static int test_klp_callbacks_demo2_init(void) -{ - patch.replace = replace; - return klp_enable_patch(&patch); -} - -static void test_klp_callbacks_demo2_exit(void) -{ -} - -module_init(test_klp_callbacks_demo2_init); -module_exit(test_klp_callbacks_demo2_exit); -MODULE_LICENSE("GPL"); -MODULE_INFO(livepatch, "Y"); -MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); -MODULE_DESCRIPTION("Livepatch test: livepatch demo2"); diff --git a/lib/livepatch/test_klp_callbacks_mod.c b/lib/livepatch/test_klp_callbacks_mod.c deleted file mode 100644 index 8fbe645b1c2c..000000000000 --- a/lib/livepatch/test_klp_callbacks_mod.c +++ /dev/null @@ -1,24 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> - -static int test_klp_callbacks_mod_init(void) -{ - pr_info("%s\n", __func__); - return 0; -} - -static void test_klp_callbacks_mod_exit(void) -{ - pr_info("%s\n", __func__); -} - -module_init(test_klp_callbacks_mod_init); -module_exit(test_klp_callbacks_mod_exit); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); -MODULE_DESCRIPTION("Livepatch test: target module"); diff --git a/lib/livepatch/test_klp_livepatch.c b/lib/livepatch/test_klp_livepatch.c deleted file mode 100644 index aff08199de71..000000000000 --- a/lib/livepatch/test_klp_livepatch.c +++ /dev/null @@ -1,51 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/livepatch.h> - -#include <linux/seq_file.h> -static int livepatch_cmdline_proc_show(struct seq_file *m, void *v) -{ - seq_printf(m, "%s: %s\n", THIS_MODULE->name, - "this has been live patched"); - return 0; -} - -static struct klp_func funcs[] = { - { - .old_name = "cmdline_proc_show", - .new_func = livepatch_cmdline_proc_show, - }, { } -}; - -static struct klp_object objs[] = { - { - /* name being NULL means vmlinux */ - .funcs = funcs, - }, { } -}; - -static struct klp_patch patch = { - .mod = THIS_MODULE, - .objs = objs, -}; - -static int test_klp_livepatch_init(void) -{ - return klp_enable_patch(&patch); -} - -static void test_klp_livepatch_exit(void) -{ -} - -module_init(test_klp_livepatch_init); -module_exit(test_klp_livepatch_exit); -MODULE_LICENSE("GPL"); -MODULE_INFO(livepatch, "Y"); -MODULE_AUTHOR("Seth Jennings <sjenning@redhat.com>"); -MODULE_DESCRIPTION("Livepatch test: livepatch module"); diff --git a/lib/livepatch/test_klp_shadow_vars.c b/lib/livepatch/test_klp_shadow_vars.c deleted file mode 100644 index b99116490858..000000000000 --- a/lib/livepatch/test_klp_shadow_vars.c +++ /dev/null @@ -1,301 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/list.h> -#include <linux/livepatch.h> -#include <linux/slab.h> - -/* - * Keep a small list of pointers so that we can print address-agnostic - * pointer values. Use a rolling integer count to differentiate the values. - * Ironically we could have used the shadow variable API to do this, but - * let's not lean too heavily on the very code we're testing. - */ -static LIST_HEAD(ptr_list); -struct shadow_ptr { - void *ptr; - int id; - struct list_head list; -}; - -static void free_ptr_list(void) -{ - struct shadow_ptr *sp, *tmp_sp; - - list_for_each_entry_safe(sp, tmp_sp, &ptr_list, list) { - list_del(&sp->list); - kfree(sp); - } -} - -static int ptr_id(void *ptr) -{ - struct shadow_ptr *sp; - static int count; - - list_for_each_entry(sp, &ptr_list, list) { - if (sp->ptr == ptr) - return sp->id; - } - - sp = kmalloc(sizeof(*sp), GFP_ATOMIC); - if (!sp) - return -ENOMEM; - sp->ptr = ptr; - sp->id = count++; - - list_add(&sp->list, &ptr_list); - - return sp->id; -} - -/* - * Shadow variable wrapper functions that echo the function and arguments - * to the kernel log for testing verification. Don't display raw pointers, - * but use the ptr_id() value instead. - */ -static void *shadow_get(void *obj, unsigned long id) -{ - int **sv; - - sv = klp_shadow_get(obj, id); - pr_info("klp_%s(obj=PTR%d, id=0x%lx) = PTR%d\n", - __func__, ptr_id(obj), id, ptr_id(sv)); - - return sv; -} - -static void *shadow_alloc(void *obj, unsigned long id, size_t size, - gfp_t gfp_flags, klp_shadow_ctor_t ctor, - void *ctor_data) -{ - int **var = ctor_data; - int **sv; - - sv = klp_shadow_alloc(obj, id, size, gfp_flags, ctor, var); - pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n", - __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor), - ptr_id(*var), ptr_id(sv)); - - return sv; -} - -static void *shadow_get_or_alloc(void *obj, unsigned long id, size_t size, - gfp_t gfp_flags, klp_shadow_ctor_t ctor, - void *ctor_data) -{ - int **var = ctor_data; - int **sv; - - sv = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor, var); - pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n", - __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor), - ptr_id(*var), ptr_id(sv)); - - return sv; -} - -static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor) -{ - klp_shadow_free(obj, id, dtor); - pr_info("klp_%s(obj=PTR%d, id=0x%lx, dtor=PTR%d)\n", - __func__, ptr_id(obj), id, ptr_id(dtor)); -} - -static void shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor) -{ - klp_shadow_free_all(id, dtor); - pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n", __func__, id, ptr_id(dtor)); -} - - -/* Shadow variable constructor - remember simple pointer data */ -static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data) -{ - int **sv = shadow_data; - int **var = ctor_data; - - if (!var) - return -EINVAL; - - *sv = *var; - pr_info("%s: PTR%d -> PTR%d\n", __func__, ptr_id(sv), ptr_id(*var)); - - return 0; -} - -/* - * With more than one item to free in the list, order is not determined and - * shadow_dtor will not be passed to shadow_free_all() which would make the - * test fail. (see pass 6) - */ -static void shadow_dtor(void *obj, void *shadow_data) -{ - int **sv = shadow_data; - - pr_info("%s(obj=PTR%d, shadow_data=PTR%d)\n", - __func__, ptr_id(obj), ptr_id(sv)); -} - -/* number of objects we simulate that need shadow vars */ -#define NUM_OBJS 3 - -/* dynamically created obj fields have the following shadow var id values */ -#define SV_ID1 0x1234 -#define SV_ID2 0x1235 - -/* - * The main test case adds/removes new fields (shadow var) to each of these - * test structure instances. The last group of fields in the struct represent - * the idea that shadow variables may be added and removed to and from the - * struct during execution. - */ -struct test_object { - /* add anything here below and avoid to define an empty struct */ - struct shadow_ptr sp; - - /* these represent shadow vars added and removed with SV_ID{1,2} */ - /* char nfield1; */ - /* int nfield2; */ -}; - -static int test_klp_shadow_vars_init(void) -{ - struct test_object objs[NUM_OBJS]; - char nfields1[NUM_OBJS], *pnfields1[NUM_OBJS], **sv1[NUM_OBJS]; - char *pndup[NUM_OBJS]; - int nfields2[NUM_OBJS], *pnfields2[NUM_OBJS], **sv2[NUM_OBJS]; - void **sv; - int ret; - int i; - - ptr_id(NULL); - - /* - * With an empty shadow variable hash table, expect not to find - * any matches. - */ - sv = shadow_get(&objs[0], SV_ID1); - if (!sv) - pr_info(" got expected NULL result\n"); - - /* pass 1: init & alloc a char+int pair of svars for each objs */ - for (i = 0; i < NUM_OBJS; i++) { - pnfields1[i] = &nfields1[i]; - ptr_id(pnfields1[i]); - - if (i % 2) { - sv1[i] = shadow_alloc(&objs[i], SV_ID1, - sizeof(pnfields1[i]), GFP_KERNEL, - shadow_ctor, &pnfields1[i]); - } else { - sv1[i] = shadow_get_or_alloc(&objs[i], SV_ID1, - sizeof(pnfields1[i]), GFP_KERNEL, - shadow_ctor, &pnfields1[i]); - } - if (!sv1[i]) { - ret = -ENOMEM; - goto out; - } - - pnfields2[i] = &nfields2[i]; - ptr_id(pnfields2[i]); - sv2[i] = shadow_alloc(&objs[i], SV_ID2, sizeof(pnfields2[i]), - GFP_KERNEL, shadow_ctor, &pnfields2[i]); - if (!sv2[i]) { - ret = -ENOMEM; - goto out; - } - } - - /* pass 2: verify we find allocated svars and where they point to */ - for (i = 0; i < NUM_OBJS; i++) { - /* check the "char" svar for all objects */ - sv = shadow_get(&objs[i], SV_ID1); - if (!sv) { - ret = -EINVAL; - goto out; - } - if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i]) - pr_info(" got expected PTR%d -> PTR%d result\n", - ptr_id(sv1[i]), ptr_id(*sv1[i])); - - /* check the "int" svar for all objects */ - sv = shadow_get(&objs[i], SV_ID2); - if (!sv) { - ret = -EINVAL; - goto out; - } - if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i]) - pr_info(" got expected PTR%d -> PTR%d result\n", - ptr_id(sv2[i]), ptr_id(*sv2[i])); - } - - /* pass 3: verify that 'get_or_alloc' returns already allocated svars */ - for (i = 0; i < NUM_OBJS; i++) { - pndup[i] = &nfields1[i]; - ptr_id(pndup[i]); - - sv = shadow_get_or_alloc(&objs[i], SV_ID1, sizeof(pndup[i]), - GFP_KERNEL, shadow_ctor, &pndup[i]); - if (!sv) { - ret = -EINVAL; - goto out; - } - if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i]) - pr_info(" got expected PTR%d -> PTR%d result\n", - ptr_id(sv1[i]), ptr_id(*sv1[i])); - } - - /* pass 4: free <objs[*], SV_ID1> pairs of svars, verify removal */ - for (i = 0; i < NUM_OBJS; i++) { - shadow_free(&objs[i], SV_ID1, shadow_dtor); /* 'char' pairs */ - sv = shadow_get(&objs[i], SV_ID1); - if (!sv) - pr_info(" got expected NULL result\n"); - } - - /* pass 5: check we still find <objs[*], SV_ID2> svar pairs */ - for (i = 0; i < NUM_OBJS; i++) { - sv = shadow_get(&objs[i], SV_ID2); /* 'int' pairs */ - if (!sv) { - ret = -EINVAL; - goto out; - } - if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i]) - pr_info(" got expected PTR%d -> PTR%d result\n", - ptr_id(sv2[i]), ptr_id(*sv2[i])); - } - - /* pass 6: free all the <objs[*], SV_ID2> svar pairs too. */ - shadow_free_all(SV_ID2, NULL); /* 'int' pairs */ - for (i = 0; i < NUM_OBJS; i++) { - sv = shadow_get(&objs[i], SV_ID2); - if (!sv) - pr_info(" got expected NULL result\n"); - } - - free_ptr_list(); - - return 0; -out: - shadow_free_all(SV_ID1, NULL); /* 'char' pairs */ - shadow_free_all(SV_ID2, NULL); /* 'int' pairs */ - free_ptr_list(); - - return ret; -} - -static void test_klp_shadow_vars_exit(void) -{ -} - -module_init(test_klp_shadow_vars_init); -module_exit(test_klp_shadow_vars_exit); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); -MODULE_DESCRIPTION("Livepatch test: shadow variables"); diff --git a/lib/livepatch/test_klp_state.c b/lib/livepatch/test_klp_state.c deleted file mode 100644 index 57a4253acb01..000000000000 --- a/lib/livepatch/test_klp_state.c +++ /dev/null @@ -1,162 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2019 SUSE - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/printk.h> -#include <linux/livepatch.h> - -#define CONSOLE_LOGLEVEL_STATE 1 -/* Version 1 does not support migration. */ -#define CONSOLE_LOGLEVEL_STATE_VERSION 1 - -static const char *const module_state[] = { - [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state", - [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init", - [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away", - [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up", -}; - -static void callback_info(const char *callback, struct klp_object *obj) -{ - if (obj->mod) - pr_info("%s: %s -> %s\n", callback, obj->mod->name, - module_state[obj->mod->state]); - else - pr_info("%s: vmlinux\n", callback); -} - -static struct klp_patch patch; - -static int allocate_loglevel_state(void) -{ - struct klp_state *loglevel_state; - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return -EINVAL; - - loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL); - if (!loglevel_state->data) - return -ENOMEM; - - pr_info("%s: allocating space to store console_loglevel\n", - __func__); - return 0; -} - -static void fix_console_loglevel(void) -{ - struct klp_state *loglevel_state; - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return; - - pr_info("%s: fixing console_loglevel\n", __func__); - *(int *)loglevel_state->data = console_loglevel; - console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; -} - -static void restore_console_loglevel(void) -{ - struct klp_state *loglevel_state; - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return; - - pr_info("%s: restoring console_loglevel\n", __func__); - console_loglevel = *(int *)loglevel_state->data; -} - -static void free_loglevel_state(void) -{ - struct klp_state *loglevel_state; - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return; - - pr_info("%s: freeing space for the stored console_loglevel\n", - __func__); - kfree(loglevel_state->data); -} - -/* Executed on object patching (ie, patch enablement) */ -static int pre_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - return allocate_loglevel_state(); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - fix_console_loglevel(); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void pre_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - restore_console_loglevel(); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - free_loglevel_state(); -} - -static struct klp_func no_funcs[] = { - {} -}; - -static struct klp_object objs[] = { - { - .name = NULL, /* vmlinux */ - .funcs = no_funcs, - .callbacks = { - .pre_patch = pre_patch_callback, - .post_patch = post_patch_callback, - .pre_unpatch = pre_unpatch_callback, - .post_unpatch = post_unpatch_callback, - }, - }, { } -}; - -static struct klp_state states[] = { - { - .id = CONSOLE_LOGLEVEL_STATE, - .version = CONSOLE_LOGLEVEL_STATE_VERSION, - }, { } -}; - -static struct klp_patch patch = { - .mod = THIS_MODULE, - .objs = objs, - .states = states, - .replace = true, -}; - -static int test_klp_callbacks_demo_init(void) -{ - return klp_enable_patch(&patch); -} - -static void test_klp_callbacks_demo_exit(void) -{ -} - -module_init(test_klp_callbacks_demo_init); -module_exit(test_klp_callbacks_demo_exit); -MODULE_LICENSE("GPL"); -MODULE_INFO(livepatch, "Y"); -MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>"); -MODULE_DESCRIPTION("Livepatch test: system state modification"); diff --git a/lib/livepatch/test_klp_state2.c b/lib/livepatch/test_klp_state2.c deleted file mode 100644 index c978ea4d5e67..000000000000 --- a/lib/livepatch/test_klp_state2.c +++ /dev/null @@ -1,191 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2019 SUSE - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/printk.h> -#include <linux/livepatch.h> - -#define CONSOLE_LOGLEVEL_STATE 1 -/* Version 2 supports migration. */ -#define CONSOLE_LOGLEVEL_STATE_VERSION 2 - -static const char *const module_state[] = { - [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state", - [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init", - [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away", - [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up", -}; - -static void callback_info(const char *callback, struct klp_object *obj) -{ - if (obj->mod) - pr_info("%s: %s -> %s\n", callback, obj->mod->name, - module_state[obj->mod->state]); - else - pr_info("%s: vmlinux\n", callback); -} - -static struct klp_patch patch; - -static int allocate_loglevel_state(void) -{ - struct klp_state *loglevel_state, *prev_loglevel_state; - - prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE); - if (prev_loglevel_state) { - pr_info("%s: space to store console_loglevel already allocated\n", - __func__); - return 0; - } - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return -EINVAL; - - loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL); - if (!loglevel_state->data) - return -ENOMEM; - - pr_info("%s: allocating space to store console_loglevel\n", - __func__); - return 0; -} - -static void fix_console_loglevel(void) -{ - struct klp_state *loglevel_state, *prev_loglevel_state; - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return; - - prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE); - if (prev_loglevel_state) { - pr_info("%s: taking over the console_loglevel change\n", - __func__); - loglevel_state->data = prev_loglevel_state->data; - return; - } - - pr_info("%s: fixing console_loglevel\n", __func__); - *(int *)loglevel_state->data = console_loglevel; - console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; -} - -static void restore_console_loglevel(void) -{ - struct klp_state *loglevel_state, *prev_loglevel_state; - - prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE); - if (prev_loglevel_state) { - pr_info("%s: passing the console_loglevel change back to the old livepatch\n", - __func__); - return; - } - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return; - - pr_info("%s: restoring console_loglevel\n", __func__); - console_loglevel = *(int *)loglevel_state->data; -} - -static void free_loglevel_state(void) -{ - struct klp_state *loglevel_state, *prev_loglevel_state; - - prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE); - if (prev_loglevel_state) { - pr_info("%s: keeping space to store console_loglevel\n", - __func__); - return; - } - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return; - - pr_info("%s: freeing space for the stored console_loglevel\n", - __func__); - kfree(loglevel_state->data); -} - -/* Executed on object patching (ie, patch enablement) */ -static int pre_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - return allocate_loglevel_state(); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - fix_console_loglevel(); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void pre_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - restore_console_loglevel(); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - free_loglevel_state(); -} - -static struct klp_func no_funcs[] = { - {} -}; - -static struct klp_object objs[] = { - { - .name = NULL, /* vmlinux */ - .funcs = no_funcs, - .callbacks = { - .pre_patch = pre_patch_callback, - .post_patch = post_patch_callback, - .pre_unpatch = pre_unpatch_callback, - .post_unpatch = post_unpatch_callback, - }, - }, { } -}; - -static struct klp_state states[] = { - { - .id = CONSOLE_LOGLEVEL_STATE, - .version = CONSOLE_LOGLEVEL_STATE_VERSION, - }, { } -}; - -static struct klp_patch patch = { - .mod = THIS_MODULE, - .objs = objs, - .states = states, - .replace = true, -}; - -static int test_klp_callbacks_demo_init(void) -{ - return klp_enable_patch(&patch); -} - -static void test_klp_callbacks_demo_exit(void) -{ -} - -module_init(test_klp_callbacks_demo_init); -module_exit(test_klp_callbacks_demo_exit); -MODULE_LICENSE("GPL"); -MODULE_INFO(livepatch, "Y"); -MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>"); -MODULE_DESCRIPTION("Livepatch test: system state modification"); diff --git a/lib/livepatch/test_klp_state3.c b/lib/livepatch/test_klp_state3.c deleted file mode 100644 index 9226579d10c5..000000000000 --- a/lib/livepatch/test_klp_state3.c +++ /dev/null @@ -1,5 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2019 SUSE - -/* The console loglevel fix is the same in the next cumulative patch. */ -#include "test_klp_state2.c" diff --git a/lib/maple_tree.c b/lib/maple_tree.c index bb24d84a4922..2d7d27e6ae3c 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -4,6 +4,8 @@ * Copyright (c) 2018-2022 Oracle Corporation * Authors: Liam R. Howlett <Liam.Howlett@oracle.com> * Matthew Wilcox <willy@infradead.org> + * Copyright (c) 2023 ByteDance + * Author: Peng Zhang <zhangpeng.00@bytedance.com> */ /* @@ -14,8 +16,8 @@ * and are simply the slot index + the minimum of the node. * * In regular B-Tree terms, pivots are called keys. The term pivot is used to - * indicate that the tree is specifying ranges, Pivots may appear in the - * subtree with an entry attached to the value where as keys are unique to a + * indicate that the tree is specifying ranges. Pivots may appear in the + * subtree with an entry attached to the value whereas keys are unique to a * specific position of a B-tree. Pivot values are inclusive of the slot with * the same index. * @@ -165,6 +167,11 @@ static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes) return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes); } +static inline void mt_free_one(struct maple_node *node) +{ + kmem_cache_free(maple_node_cache, node); +} + static inline void mt_free_bulk(size_t size, void __rcu **nodes) { kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes); @@ -205,23 +212,29 @@ static unsigned int mas_mt_height(struct ma_state *mas) return mt_height(mas->tree); } -static inline enum maple_type mte_node_type(const struct maple_enode *entry) +static inline unsigned int mt_attr(struct maple_tree *mt) +{ + return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK; +} + +static __always_inline enum maple_type mte_node_type( + const struct maple_enode *entry) { return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) & MAPLE_NODE_TYPE_MASK; } -static inline bool ma_is_dense(const enum maple_type type) +static __always_inline bool ma_is_dense(const enum maple_type type) { return type < maple_leaf_64; } -static inline bool ma_is_leaf(const enum maple_type type) +static __always_inline bool ma_is_leaf(const enum maple_type type) { return type < maple_range_64; } -static inline bool mte_is_leaf(const struct maple_enode *entry) +static __always_inline bool mte_is_leaf(const struct maple_enode *entry) { return ma_is_leaf(mte_node_type(entry)); } @@ -230,60 +243,50 @@ static inline bool mte_is_leaf(const struct maple_enode *entry) * We also reserve values with the bottom two bits set to '10' which are * below 4096 */ -static inline bool mt_is_reserved(const void *entry) +static __always_inline bool mt_is_reserved(const void *entry) { return ((unsigned long)entry < MAPLE_RESERVED_RANGE) && xa_is_internal(entry); } -static inline void mas_set_err(struct ma_state *mas, long err) +static __always_inline void mas_set_err(struct ma_state *mas, long err) { mas->node = MA_ERROR(err); + mas->status = ma_error; } -static inline bool mas_is_ptr(const struct ma_state *mas) +static __always_inline bool mas_is_ptr(const struct ma_state *mas) { - return mas->node == MAS_ROOT; + return mas->status == ma_root; } -static inline bool mas_is_start(const struct ma_state *mas) +static __always_inline bool mas_is_start(const struct ma_state *mas) { - return mas->node == MAS_START; + return mas->status == ma_start; } -bool mas_is_err(struct ma_state *mas) +static __always_inline bool mas_is_none(const struct ma_state *mas) { - return xa_is_err(mas->node); + return mas->status == ma_none; } -static __always_inline bool mas_is_overflow(struct ma_state *mas) +static __always_inline bool mas_is_paused(const struct ma_state *mas) { - if (unlikely(mas->node == MAS_OVERFLOW)) - return true; - - return false; + return mas->status == ma_pause; } -static __always_inline bool mas_is_underflow(struct ma_state *mas) +static __always_inline bool mas_is_overflow(struct ma_state *mas) { - if (unlikely(mas->node == MAS_UNDERFLOW)) - return true; - - return false; + return mas->status == ma_overflow; } -static inline bool mas_searchable(struct ma_state *mas) +static inline bool mas_is_underflow(struct ma_state *mas) { - if (mas_is_none(mas)) - return false; - - if (mas_is_ptr(mas)) - return false; - - return true; + return mas->status == ma_underflow; } -static inline struct maple_node *mte_to_node(const struct maple_enode *entry) +static __always_inline struct maple_node *mte_to_node( + const struct maple_enode *entry) { return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK); } @@ -360,12 +363,12 @@ static inline bool mte_has_null(const struct maple_enode *node) return (unsigned long)node & MAPLE_ENODE_NULL; } -static inline bool ma_is_root(struct maple_node *node) +static __always_inline bool ma_is_root(struct maple_node *node) { return ((unsigned long)node->parent & MA_ROOT_PARENT); } -static inline bool mte_is_root(const struct maple_enode *node) +static __always_inline bool mte_is_root(const struct maple_enode *node) { return ma_is_root(mte_to_node(node)); } @@ -375,7 +378,7 @@ static inline bool mas_is_root_limits(const struct ma_state *mas) return !mas->min && mas->max == ULONG_MAX; } -static inline bool mt_is_alloc(struct maple_tree *mt) +static __always_inline bool mt_is_alloc(struct maple_tree *mt) { return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE); } @@ -514,11 +517,12 @@ void mas_set_parent(struct ma_state *mas, struct maple_enode *enode, * * Return: The slot in the parent node where @enode resides. */ -static inline unsigned int mte_parent_slot(const struct maple_enode *enode) +static __always_inline +unsigned int mte_parent_slot(const struct maple_enode *enode) { unsigned long val = (unsigned long)mte_to_node(enode)->parent; - if (val & MA_ROOT_PARENT) + if (unlikely(val & MA_ROOT_PARENT)) return 0; /* @@ -534,7 +538,8 @@ static inline unsigned int mte_parent_slot(const struct maple_enode *enode) * * Return: The parent maple node. */ -static inline struct maple_node *mte_parent(const struct maple_enode *enode) +static __always_inline +struct maple_node *mte_parent(const struct maple_enode *enode) { return (void *)((unsigned long) (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK); @@ -546,7 +551,7 @@ static inline struct maple_node *mte_parent(const struct maple_enode *enode) * * Return: true if dead, false otherwise. */ -static inline bool ma_dead_node(const struct maple_node *node) +static __always_inline bool ma_dead_node(const struct maple_node *node) { struct maple_node *parent; @@ -562,7 +567,7 @@ static inline bool ma_dead_node(const struct maple_node *node) * * Return: true if dead, false otherwise. */ -static inline bool mte_dead_node(const struct maple_enode *enode) +static __always_inline bool mte_dead_node(const struct maple_enode *enode) { struct maple_node *parent, *node; @@ -680,35 +685,6 @@ static inline unsigned long *ma_gaps(struct maple_node *node, } /* - * mas_pivot() - Get the pivot at @piv of the maple encoded node. - * @mas: The maple state. - * @piv: The pivot. - * - * Return: the pivot at @piv of @mn. - */ -static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv) -{ - struct maple_node *node = mas_mn(mas); - enum maple_type type = mte_node_type(mas->node); - - if (MAS_WARN_ON(mas, piv >= mt_pivots[type])) { - mas_set_err(mas, -EIO); - return 0; - } - - switch (type) { - case maple_arange_64: - return node->ma64.pivot[piv]; - case maple_range_64: - case maple_leaf_64: - return node->mr64.pivot[piv]; - case maple_dense: - return 0; - } - return 0; -} - -/* * mas_safe_pivot() - get the pivot at @piv or mas->max. * @mas: The maple state * @pivots: The pointer to the maple node pivots @@ -718,7 +694,7 @@ static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv) * Return: The pivot at @piv within the limit of the @pivots array, @mas->max * otherwise. */ -static inline unsigned long +static __always_inline unsigned long mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots, unsigned char piv, enum maple_type type) { @@ -759,7 +735,6 @@ static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv, BUG_ON(piv >= mt_pivots[type]); switch (type) { - default: case maple_range_64: case maple_leaf_64: node->mr64.pivot[piv] = val; @@ -783,7 +758,6 @@ static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv, static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) { switch (mt) { - default: case maple_arange_64: return mn->ma64.slot; case maple_range_64: @@ -792,6 +766,8 @@ static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) case maple_dense: return mn->slot; } + + return NULL; } static inline bool mt_write_locked(const struct maple_tree *mt) @@ -800,20 +776,20 @@ static inline bool mt_write_locked(const struct maple_tree *mt) lockdep_is_held(&mt->ma_lock); } -static inline bool mt_locked(const struct maple_tree *mt) +static __always_inline bool mt_locked(const struct maple_tree *mt) { return mt_external_lock(mt) ? mt_lock_is_held(mt) : lockdep_is_held(&mt->ma_lock); } -static inline void *mt_slot(const struct maple_tree *mt, +static __always_inline void *mt_slot(const struct maple_tree *mt, void __rcu **slots, unsigned char offset) { return rcu_dereference_check(slots[offset], mt_locked(mt)); } -static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots, - unsigned char offset) +static __always_inline void *mt_slot_locked(struct maple_tree *mt, + void __rcu **slots, unsigned char offset) { return rcu_dereference_protected(slots[offset], mt_write_locked(mt)); } @@ -825,8 +801,8 @@ static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots, * * Return: The entry stored in @slots at the @offset. */ -static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots, - unsigned char offset) +static __always_inline void *mas_slot_locked(struct ma_state *mas, + void __rcu **slots, unsigned char offset) { return mt_slot_locked(mas->tree, slots, offset); } @@ -839,8 +815,8 @@ static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots, * * Return: The entry stored in @slots at the @offset */ -static inline void *mas_slot(struct ma_state *mas, void __rcu **slots, - unsigned char offset) +static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots, + unsigned char offset) { return mt_slot(mas->tree, slots, offset); } @@ -851,7 +827,7 @@ static inline void *mas_slot(struct ma_state *mas, void __rcu **slots, * * Return: The pointer to the root of the tree */ -static inline void *mas_root(struct ma_state *mas) +static __always_inline void *mas_root(struct ma_state *mas) { return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree)); } @@ -954,10 +930,8 @@ static inline unsigned char ma_meta_end(struct maple_node *mn, /* * ma_meta_gap() - Get the largest gap location of a node from the metadata * @mn: The maple node - * @mt: The maple node type */ -static inline unsigned char ma_meta_gap(struct maple_node *mn, - enum maple_type mt) +static inline unsigned char ma_meta_gap(struct maple_node *mn) { return mn->ma64.meta.gap; } @@ -1112,14 +1086,16 @@ static int mas_ascend(struct ma_state *mas) return 0; } - if (!mas->min) + min = 0; + max = ULONG_MAX; + if (!mas->offset) { + min = mas->min; set_min = true; + } if (mas->max == ULONG_MAX) set_max = true; - min = 0; - max = ULONG_MAX; do { p_enode = a_enode; a_type = mas_parent_type(mas, p_enode); @@ -1258,6 +1234,7 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) if (mas->mas_flags & MA_STATE_PREALLOC) { if (allocated) return; + BUG_ON(!allocated); WARN_ON(!allocated); } @@ -1330,8 +1307,8 @@ static inline void mas_free(struct ma_state *mas, struct maple_enode *used) } /* - * mas_node_count() - Check if enough nodes are allocated and request more if - * there is not enough nodes. + * mas_node_count_gfp() - Check if enough nodes are allocated and request more + * if there is not enough nodes. * @mas: The maple state * @count: The number of nodes needed * @gfp: the gfp flags @@ -1363,14 +1340,14 @@ static void mas_node_count(struct ma_state *mas, int count) * mas_start() - Sets up maple state for operations. * @mas: The maple state. * - * If mas->node == MAS_START, then set the min, max and depth to + * If mas->status == mas_start, then set the min, max and depth to * defaults. * * Return: - * - If mas->node is an error or not MAS_START, return NULL. - * - If it's an empty tree: NULL & mas->node == MAS_NONE - * - If it's a single entry: The entry & mas->node == MAS_ROOT - * - If it's a tree: NULL & mas->node == safe root node. + * - If mas->node is an error or not mas_start, return NULL. + * - If it's an empty tree: NULL & mas->status == ma_none + * - If it's a single entry: The entry & mas->status == mas_root + * - If it's a tree: NULL & mas->status == safe root node. */ static inline struct maple_enode *mas_start(struct ma_state *mas) { @@ -1386,6 +1363,7 @@ retry: /* Tree with nodes */ if (likely(xa_is_node(root))) { mas->depth = 1; + mas->status = ma_active; mas->node = mte_safe_root(root); mas->offset = 0; if (mte_dead_node(mas->node)) @@ -1396,13 +1374,14 @@ retry: /* empty tree */ if (unlikely(!root)) { - mas->node = MAS_NONE; + mas->node = NULL; + mas->status = ma_none; mas->offset = MAPLE_NODE_SLOTS; return NULL; } /* Single entry tree */ - mas->node = MAS_ROOT; + mas->status = ma_root; mas->offset = MAPLE_NODE_SLOTS; /* Single entry tree. */ @@ -1425,10 +1404,8 @@ retry: * Uses metadata to find the end of the data when possible. * Return: The zero indexed last slot with data (may be null). */ -static inline unsigned char ma_data_end(struct maple_node *node, - enum maple_type type, - unsigned long *pivots, - unsigned long max) +static __always_inline unsigned char ma_data_end(struct maple_node *node, + enum maple_type type, unsigned long *pivots, unsigned long max) { unsigned char offset; @@ -1541,6 +1518,9 @@ static unsigned long mas_leaf_max_gap(struct ma_state *mas) gap = ULONG_MAX - pivots[max_piv]; if (gap > max_gap) max_gap = gap; + + if (max_gap > pivots[max_piv] - mas->min) + return max_gap; } for (; i <= max_piv; i++) { @@ -1608,7 +1588,7 @@ static inline unsigned long mas_max_gap(struct ma_state *mas) node = mas_mn(mas); MAS_BUG_ON(mas, mt != maple_arange_64); - offset = ma_meta_gap(node, mt); + offset = ma_meta_gap(node); gaps = ma_gaps(node, mt); return gaps[offset]; } @@ -1639,7 +1619,7 @@ static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset, ascend: MAS_BUG_ON(mas, pmt != maple_arange_64); - meta_offset = ma_meta_gap(pnode, pmt); + meta_offset = ma_meta_gap(pnode); meta_gap = pgaps[meta_offset]; pgaps[offset] = new; @@ -1987,27 +1967,13 @@ complete: /* * mas_leaf_set_meta() - Set the metadata of a leaf if possible. - * @mas: The maple state * @node: The maple node - * @pivots: pointer to the maple node pivots * @mt: The maple type - * @end: The assumed end - * - * Note, end may be incremented within this function but not modified at the - * source. This is fine since the metadata is the last thing to be stored in a - * node during a write. + * @end: The node end */ -static inline void mas_leaf_set_meta(struct ma_state *mas, - struct maple_node *node, unsigned long *pivots, +static inline void mas_leaf_set_meta(struct maple_node *node, enum maple_type mt, unsigned char end) { - /* There is no room for metadata already */ - if (mt_pivots[mt] <= end) - return; - - if (pivots[end] && pivots[end] < mas->max) - end++; - if (end < mt_slots[mt] - 1) ma_set_meta(node, mt, 0, end); } @@ -2064,7 +2030,7 @@ static inline void mab_mas_cp(struct maple_big_node *b_node, ma_set_meta(node, mt, offset, end); } else { - mas_leaf_set_meta(mas, node, pivots, mt, end); + mas_leaf_set_meta(node, mt, end); } } @@ -2152,11 +2118,11 @@ static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas, } slot = offset_end + 1; - if (slot > wr_mas->node_end) + if (slot > mas->end) goto b_end; /* Copy end data to the end of the node. */ - mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end); + mas_mab_cp(mas, slot, mas->end + 1, b_node, ++b_end); b_node->b_end--; return; @@ -2211,19 +2177,21 @@ static inline bool mas_next_sibling(struct ma_state *mas) } /* - * mte_node_or_node() - Return the encoded node or MAS_NONE. + * mte_node_or_none() - Set the enode and state. * @enode: The encoded maple node. * - * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state. - * - * Return: @enode or MAS_NONE + * Set the node to the enode and the status. */ -static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode) +static inline void mas_node_or_none(struct ma_state *mas, + struct maple_enode *enode) { - if (enode) - return enode; - - return ma_enode_ptr(MAS_NONE); + if (enode) { + mas->node = enode; + mas->status = ma_active; + } else { + mas->node = NULL; + mas->status = ma_none; + } } /* @@ -2245,8 +2213,8 @@ static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) wr_mas->node = mas_mn(wr_mas->mas); wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type); - count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type, - wr_mas->pivots, mas->max); + count = mas->end = ma_data_end(wr_mas->node, wr_mas->type, + wr_mas->pivots, mas->max); offset = mas->offset; while (offset < count && mas->index > wr_mas->pivots[offset]) @@ -2303,8 +2271,6 @@ bool mast_spanning_rebalance(struct maple_subtree_state *mast) struct ma_state l_tmp = *mast->orig_l; unsigned char depth = 0; - r_tmp = *mast->orig_r; - l_tmp = *mast->orig_l; do { mas_ascend(mast->orig_r); mas_ascend(mast->orig_l); @@ -2535,7 +2501,7 @@ static inline void mast_set_split_parents(struct maple_subtree_state *mast, } /* - * mas_topiary_node() - Dispose of a singe node + * mas_topiary_node() - Dispose of a single node * @mas: The maple state for pushing nodes * @enode: The encoded maple node * @in_rcu: If the tree is in rcu mode @@ -2543,13 +2509,15 @@ static inline void mast_set_split_parents(struct maple_subtree_state *mast, * The node will either be RCU freed or pushed back on the maple state. */ static inline void mas_topiary_node(struct ma_state *mas, - struct maple_enode *enode, bool in_rcu) + struct ma_state *tmp_mas, bool in_rcu) { struct maple_node *tmp; + struct maple_enode *enode; - if (enode == MAS_NONE) + if (mas_is_none(tmp_mas)) return; + enode = tmp_mas->node; tmp = mte_to_node(enode); mte_set_node_dead(enode); if (in_rcu) @@ -2589,8 +2557,8 @@ static inline void mas_topiary_replace(struct ma_state *mas, /* Update the parent pointers in the tree */ tmp[0] = *mas; tmp[0].offset = 0; - tmp[1].node = MAS_NONE; - tmp[2].node = MAS_NONE; + tmp[1].status = ma_none; + tmp[2].status = ma_none; while (!mte_is_leaf(tmp[0].node)) { n = 0; for (i = 0; i < 3; i++) { @@ -2610,7 +2578,7 @@ static inline void mas_topiary_replace(struct ma_state *mas, break; while (n < 3) - tmp_next[n++].node = MAS_NONE; + tmp_next[n++].status = ma_none; for (i = 0; i < 3; i++) tmp[i] = tmp_next[i]; @@ -2623,8 +2591,8 @@ static inline void mas_topiary_replace(struct ma_state *mas, tmp[0] = *mas; tmp[0].offset = 0; tmp[0].node = old_enode; - tmp[1].node = MAS_NONE; - tmp[2].node = MAS_NONE; + tmp[1].status = ma_none; + tmp[2].status = ma_none; in_rcu = mt_in_rcu(mas->tree); do { n = 0; @@ -2639,7 +2607,7 @@ static inline void mas_topiary_replace(struct ma_state *mas, if ((tmp_next[n].min >= tmp_next->index) && (tmp_next[n].max <= tmp_next->last)) { mat_add(&subtrees, tmp_next[n].node); - tmp_next[n].node = MAS_NONE; + tmp_next[n].status = ma_none; } else { n++; } @@ -2650,16 +2618,16 @@ static inline void mas_topiary_replace(struct ma_state *mas, break; while (n < 3) - tmp_next[n++].node = MAS_NONE; + tmp_next[n++].status = ma_none; for (i = 0; i < 3; i++) { - mas_topiary_node(mas, tmp[i].node, in_rcu); + mas_topiary_node(mas, &tmp[i], in_rcu); tmp[i] = tmp_next[i]; } } while (!mte_is_leaf(tmp[0].node)); for (i = 0; i < 3; i++) - mas_topiary_node(mas, tmp[i].node, in_rcu); + mas_topiary_node(mas, &tmp[i], in_rcu); mas_mat_destroy(mas, &subtrees); } @@ -2698,9 +2666,9 @@ static inline void mast_cp_to_nodes(struct maple_subtree_state *mast, { bool new_lmax = true; - mast->l->node = mte_node_or_none(left); - mast->m->node = mte_node_or_none(middle); - mast->r->node = mte_node_or_none(right); + mas_node_or_none(mast->l, left); + mas_node_or_none(mast->m, middle); + mas_node_or_none(mast->r, right); mast->l->min = mast->orig_l->min; if (split == mast->bn->b_end) { @@ -2796,32 +2764,29 @@ static inline void *mtree_range_walk(struct ma_state *mas) min = mas->min; max = mas->max; do { - offset = 0; last = next; node = mte_to_node(next); type = mte_node_type(next); pivots = ma_pivots(node, type); end = ma_data_end(node, type, pivots, max); - if (unlikely(ma_dead_node(node))) - goto dead_node; - - if (pivots[offset] >= mas->index) { - prev_max = max; - prev_min = min; - max = pivots[offset]; + prev_min = min; + prev_max = max; + if (pivots[0] >= mas->index) { + offset = 0; + max = pivots[0]; goto next; } - do { + offset = 1; + while (offset < end) { + if (pivots[offset] >= mas->index) { + max = pivots[offset]; + break; + } offset++; - } while ((offset < end) && (pivots[offset] < mas->index)); + } - prev_min = min; min = pivots[offset - 1] + 1; - prev_max = max; - if (likely(offset < end && pivots[offset])) - max = pivots[offset]; - next: slots = ma_slots(node, type); next = mt_slot(mas->tree, slots, offset); @@ -2829,6 +2794,7 @@ next: goto dead_node; } while (!ma_is_leaf(type)); + mas->end = end; mas->offset = offset; mas->index = min; mas->last = max; @@ -2879,7 +2845,7 @@ static int mas_spanning_rebalance(struct ma_state *mas, mast->l = &l_mas; mast->m = &m_mas; mast->r = &r_mas; - l_mas.node = r_mas.node = m_mas.node = MAS_NONE; + l_mas.status = r_mas.status = m_mas.status = ma_none; /* Check if this is not root and has sufficient data. */ if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) && @@ -3167,7 +3133,7 @@ done: * @mas: The maple state * @height: The height of the tree in case it's a new root. */ -static inline bool mas_split_final_node(struct maple_subtree_state *mast, +static inline void mas_split_final_node(struct maple_subtree_state *mast, struct ma_state *mas, int height) { struct maple_enode *ancestor; @@ -3191,7 +3157,6 @@ static inline bool mas_split_final_node(struct maple_subtree_state *mast, mast->l->node = ancestor; mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true); mas->offset = mast->bn->b_end - 1; - return true; } /* @@ -3406,7 +3371,6 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node) /* Try to push left. */ if (mas_push_data(mas, height, &mast, true)) break; - /* Try to push right. */ if (mas_push_data(mas, height, &mast, false)) break; @@ -3495,6 +3459,7 @@ static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas, mas_replace_node(wr_mas->mas, old_enode); reuse_node: mas_update_gap(wr_mas->mas); + wr_mas->mas->end = b_end; return 1; } @@ -3521,6 +3486,7 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry) slots = ma_slots(node, type); node->parent = ma_parent_ptr(mas_tree_parent(mas)); mas->node = mt_mk_node(node, type); + mas->status = ma_active; if (mas->index) { if (contents) { @@ -3553,7 +3519,7 @@ static inline void mas_store_root(struct ma_state *mas, void *entry) mas_root_expand(mas, entry); else { rcu_assign_pointer(mas->tree->ma_root, entry); - mas->node = MAS_START; + mas->status = ma_start; } } @@ -3730,23 +3696,17 @@ static inline void *mtree_lookup_walk(struct ma_state *mas) enum maple_type type; void __rcu **slots; unsigned char end; - unsigned long max; next = mas->node; - max = ULONG_MAX; do { - offset = 0; node = mte_to_node(next); type = mte_node_type(next); pivots = ma_pivots(node, type); - end = ma_data_end(node, type, pivots, max); - if (unlikely(ma_dead_node(node))) - goto dead_node; + end = mt_pivots[type]; + offset = 0; do { - if (pivots[offset] >= mas->index) { - max = pivots[offset]; + if (pivots[offset] >= mas->index) break; - } } while (++offset < end); slots = ma_slots(node, type); @@ -3785,7 +3745,7 @@ static inline int mas_new_root(struct ma_state *mas, void *entry) mas->depth = 0; mas_set_height(mas); rcu_assign_pointer(mas->tree->ma_root, entry); - mas->node = MAS_START; + mas->status = ma_start; goto done; } @@ -3798,6 +3758,7 @@ static inline int mas_new_root(struct ma_state *mas, void *entry) slots = ma_slots(node, type); node->parent = ma_parent_ptr(mas_tree_parent(mas)); mas->node = mt_mk_node(node, type); + mas->status = ma_active; rcu_assign_pointer(slots[0], entry); pivots[0] = mas->last; mas->depth = 1; @@ -3891,10 +3852,10 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) memset(&b_node, 0, sizeof(struct maple_big_node)); /* Copy l_mas and store the value in b_node. */ - mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end); + mas_store_b_node(&l_wr_mas, &b_node, l_mas.end); /* Copy r_mas into b_node. */ - if (r_mas.offset <= r_wr_mas.node_end) - mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end, + if (r_mas.offset <= r_mas.end) + mas_mab_cp(&r_mas, r_mas.offset, r_mas.end, &b_node, b_node.b_end + 1); else b_node.b_end++; @@ -3936,7 +3897,7 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas, if (mas->last == wr_mas->end_piv) offset_end++; /* don't copy this offset */ else if (unlikely(wr_mas->r_max == ULONG_MAX)) - mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); + mas_bulk_rebalance(mas, mas->end, wr_mas->type); /* set up node. */ if (in_rcu) { @@ -3972,12 +3933,12 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas, * this range wrote to the end of the node or it overwrote the rest of * the data */ - if (offset_end > wr_mas->node_end) + if (offset_end > mas->end) goto done; dst_offset = mas->offset + 1; /* Copy to the end of node if necessary. */ - copy_size = wr_mas->node_end - offset_end + 1; + copy_size = mas->end - offset_end + 1; memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end, sizeof(void *) * copy_size); memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end, @@ -3987,7 +3948,7 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas, dst_pivots[new_end] = mas->max; done: - mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end); + mas_leaf_set_meta(newnode, maple_leaf_64, new_end); if (in_rcu) { struct maple_enode *old_enode = mas->node; @@ -3998,6 +3959,7 @@ done: } trace_ma_write(__func__, mas, 0, wr_mas->entry); mas_update_gap(mas); + mas->end = new_end; return true; } @@ -4063,10 +4025,10 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) } else { /* Check next slot(s) if we are overwriting the end */ if ((mas->last == wr_mas->end_piv) && - (wr_mas->node_end != wr_mas->offset_end) && + (mas->end != wr_mas->offset_end) && !wr_mas->slots[wr_mas->offset_end + 1]) { wr_mas->offset_end++; - if (wr_mas->offset_end == wr_mas->node_end) + if (wr_mas->offset_end == mas->end) mas->last = mas->max; else mas->last = wr_mas->pivots[wr_mas->offset_end]; @@ -4091,11 +4053,11 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) { - while ((wr_mas->offset_end < wr_mas->node_end) && + while ((wr_mas->offset_end < wr_mas->mas->end) && (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end])) wr_mas->offset_end++; - if (wr_mas->offset_end < wr_mas->node_end) + if (wr_mas->offset_end < wr_mas->mas->end) wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end]; else wr_mas->end_piv = wr_mas->mas->max; @@ -4107,7 +4069,7 @@ static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; - unsigned char new_end = wr_mas->node_end + 2; + unsigned char new_end = mas->end + 2; new_end -= wr_mas->offset_end - mas->offset; if (wr_mas->r_min == mas->index) @@ -4141,10 +4103,7 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas, if (mt_in_rcu(mas->tree)) return false; - if (mas->offset != wr_mas->node_end) - return false; - - end = wr_mas->node_end; + end = mas->end; if (mas->offset != end) return false; @@ -4178,6 +4137,7 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas, if (!wr_mas->content || !wr_mas->entry) mas_update_gap(mas); + mas->end = new_end; trace_ma_write(__func__, mas, new_end, wr_mas->entry); return true; } @@ -4195,7 +4155,7 @@ static void mas_wr_bnode(struct ma_wr_state *wr_mas) trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); memset(&b_node, 0, sizeof(struct maple_big_node)); mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end); - mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end); + mas_commit_b_node(wr_mas, &b_node, wr_mas->mas->end); } static inline void mas_wr_modify(struct ma_wr_state *wr_mas) @@ -4223,7 +4183,7 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas) if (mas_wr_append(wr_mas, new_end)) return; - if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas)) + if (new_end == mas->end && mas_wr_slot_store(wr_mas)) return; if (mas_wr_node_store(wr_mas, new_end)) @@ -4328,7 +4288,57 @@ exists: } -static inline void mas_rewalk(struct ma_state *mas, unsigned long index) +/** + * mas_alloc_cyclic() - Internal call to find somewhere to store an entry + * @mas: The maple state. + * @startp: Pointer to ID. + * @range_lo: Lower bound of range to search. + * @range_hi: Upper bound of range to search. + * @entry: The entry to store. + * @next: Pointer to next ID to allocate. + * @gfp: The GFP_FLAGS to use for allocations. + * + * Return: 0 if the allocation succeeded without wrapping, 1 if the + * allocation succeeded after wrapping, or -EBUSY if there are no + * free entries. + */ +int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp, + void *entry, unsigned long range_lo, unsigned long range_hi, + unsigned long *next, gfp_t gfp) +{ + unsigned long min = range_lo; + int ret = 0; + + range_lo = max(min, *next); + ret = mas_empty_area(mas, range_lo, range_hi, 1); + if ((mas->tree->ma_flags & MT_FLAGS_ALLOC_WRAPPED) && ret == 0) { + mas->tree->ma_flags &= ~MT_FLAGS_ALLOC_WRAPPED; + ret = 1; + } + if (ret < 0 && range_lo > min) { + ret = mas_empty_area(mas, min, range_hi, 1); + if (ret == 0) + ret = 1; + } + if (ret < 0) + return ret; + + do { + mas_insert(mas, entry); + } while (mas_nomem(mas, gfp)); + if (mas_is_err(mas)) + return xa_err(mas->node); + + *startp = mas->index; + *next = *startp + 1; + if (*next == 0) + mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED; + + return ret; +} +EXPORT_SYMBOL(mas_alloc_cyclic); + +static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index) { retry: mas_set(mas, index); @@ -4337,7 +4347,7 @@ retry: goto retry; } -static inline bool mas_rewalk_if_dead(struct ma_state *mas, +static __always_inline bool mas_rewalk_if_dead(struct ma_state *mas, struct maple_node *node, const unsigned long index) { if (unlikely(ma_dead_node(node))) { @@ -4349,14 +4359,16 @@ static inline bool mas_rewalk_if_dead(struct ma_state *mas, /* * mas_prev_node() - Find the prev non-null entry at the same level in the - * tree. The prev value will be mas->node[mas->offset] or MAS_NONE. + * tree. The prev value will be mas->node[mas->offset] or the status will be + * ma_none. * @mas: The maple state * @min: The lower limit to search * - * The prev node value will be mas->node[mas->offset] or MAS_NONE. + * The prev node value will be mas->node[mas->offset] or the status will be + * ma_none. * Return: 1 if the node is dead, 0 otherwise. */ -static inline int mas_prev_node(struct ma_state *mas, unsigned long min) +static int mas_prev_node(struct ma_state *mas, unsigned long min) { enum maple_type mt; int offset, level; @@ -4416,13 +4428,14 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min) if (unlikely(mte_dead_node(mas->node))) return 1; + mas->end = mas->offset; return 0; no_entry: if (unlikely(ma_dead_node(node))) return 1; - mas->node = MAS_NONE; + mas->status = ma_underflow; return 0; } @@ -4436,8 +4449,7 @@ no_entry: * * Return: The entry in the previous slot which is possibly NULL */ -static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty, - bool set_underflow) +static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty) { void *entry; void __rcu **slots; @@ -4470,13 +4482,16 @@ again: mas->last = mas->index - 1; mas->index = mas_safe_min(mas, pivots, mas->offset); } else { + if (mas->index <= min) + goto underflow; + if (mas_prev_node(mas, min)) { mas_rewalk(mas, save_point); goto retry; } - if (mas_is_none(mas)) - goto underflow; + if (WARN_ON_ONCE(mas_is_underflow(mas))) + return NULL; mas->last = mas->max; node = mas_mn(mas); @@ -4490,12 +4505,15 @@ again: if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) goto retry; + if (likely(entry)) return entry; if (!empty) { - if (mas->index <= min) - goto underflow; + if (mas->index <= min) { + mas->status = ma_underflow; + return NULL; + } goto again; } @@ -4503,8 +4521,7 @@ again: return entry; underflow: - if (set_underflow) - mas->node = MAS_UNDERFLOW; + mas->status = ma_underflow; return NULL; } @@ -4513,28 +4530,30 @@ underflow: * @mas: The maple state * @max: The maximum pivot value to check. * - * The next value will be mas->node[mas->offset] or MAS_NONE. + * The next value will be mas->node[mas->offset] or the status will have + * overflowed. * Return: 1 on dead node, 0 otherwise. */ -static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, - unsigned long max) +static int mas_next_node(struct ma_state *mas, struct maple_node *node, + unsigned long max) { unsigned long min; unsigned long *pivots; struct maple_enode *enode; + struct maple_node *tmp; int level = 0; unsigned char node_end; enum maple_type mt; void __rcu **slots; if (mas->max >= max) - goto no_entry; + goto overflow; min = mas->max + 1; level = 0; do { if (ma_is_root(node)) - goto no_entry; + goto overflow; /* Walk up. */ if (unlikely(mas_ascend(mas))) @@ -4574,6 +4593,10 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, pivots = ma_pivots(node, mt); mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt); + tmp = mte_to_node(enode); + mt = mte_node_type(enode); + pivots = ma_pivots(tmp, mt); + mas->end = ma_data_end(tmp, mt, pivots, mas->max); if (unlikely(ma_dead_node(node))) return 1; @@ -4581,11 +4604,11 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, mas->min = min; return 0; -no_entry: +overflow: if (unlikely(ma_dead_node(node))) return 1; - mas->node = MAS_NONE; + mas->status = ma_overflow; return 0; } @@ -4600,15 +4623,13 @@ no_entry: * * Return: The entry in the next slot which is possibly NULL */ -static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty, - bool set_overflow) +static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty) { void __rcu **slots; unsigned long *pivots; unsigned long pivot; enum maple_type type; struct maple_node *node; - unsigned char data_end; unsigned long save_point = mas->last; void *entry; @@ -4616,42 +4637,45 @@ retry: node = mas_mn(mas); type = mte_node_type(mas->node); pivots = ma_pivots(node, type); - data_end = ma_data_end(node, type, pivots, mas->max); if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) goto retry; if (mas->max >= max) { - if (likely(mas->offset < data_end)) + if (likely(mas->offset < mas->end)) pivot = pivots[mas->offset]; else - goto overflow; + pivot = mas->max; if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) goto retry; - if (pivot >= max) - goto overflow; + if (pivot >= max) { /* Was at the limit, next will extend beyond */ + mas->status = ma_overflow; + return NULL; + } } - if (likely(mas->offset < data_end)) { + if (likely(mas->offset < mas->end)) { mas->index = pivots[mas->offset] + 1; again: mas->offset++; - if (likely(mas->offset < data_end)) + if (likely(mas->offset < mas->end)) mas->last = pivots[mas->offset]; else mas->last = mas->max; } else { + if (mas->last >= max) { + mas->status = ma_overflow; + return NULL; + } + if (mas_next_node(mas, node, max)) { mas_rewalk(mas, save_point); goto retry; } - if (WARN_ON_ONCE(mas_is_none(mas))) { - mas->node = MAS_OVERFLOW; + if (WARN_ON_ONCE(mas_is_overflow(mas))) return NULL; - goto overflow; - } mas->offset = 0; mas->index = mas->min; @@ -4669,21 +4693,18 @@ again: if (entry) return entry; + if (!empty) { - if (mas->last >= max) - goto overflow; + if (mas->last >= max) { + mas->status = ma_overflow; + return NULL; + } mas->index = mas->last + 1; - /* Node cannot end on NULL, so it's safe to short-cut here */ goto again; } return entry; - -overflow: - if (set_overflow) - mas->node = MAS_OVERFLOW; - return NULL; } /* @@ -4702,11 +4723,11 @@ overflow: static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) { if (mas->last >= limit) { - mas->node = MAS_OVERFLOW; + mas->status = ma_overflow; return NULL; } - return mas_next_slot(mas, limit, false, true); + return mas_next_slot(mas, limit, false); } /* @@ -4874,7 +4895,7 @@ done: * @mas: The maple state. * * mas->index and mas->last will be set to the range if there is a value. If - * mas->node is MAS_NONE, reset to MAS_START. + * mas->status is ma_none, reset to ma_start * * Return: the entry at the location or %NULL. */ @@ -4883,7 +4904,7 @@ void *mas_walk(struct ma_state *mas) void *entry; if (!mas_is_active(mas) || !mas_is_start(mas)) - mas->node = MAS_START; + mas->status = ma_start; retry: entry = mas_state_walk(mas); if (mas_is_start(mas)) { @@ -4899,7 +4920,7 @@ retry: mas->index = 1; mas->last = ULONG_MAX; - mas->node = MAS_NONE; + mas->status = ma_none; return NULL; } @@ -5026,6 +5047,7 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned char offset; unsigned long *pivots; enum maple_type mt; + struct maple_node *node; if (min > max) return -EINVAL; @@ -5056,12 +5078,14 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, if (unlikely(offset == MAPLE_NODE_SLOTS)) return -EBUSY; + node = mas_mn(mas); mt = mte_node_type(mas->node); - pivots = ma_pivots(mas_mn(mas), mt); + pivots = ma_pivots(node, mt); min = mas_safe_min(mas, pivots, offset); if (mas->index < min) mas->index = min; mas->last = mas->index + size - 1; + mas->end = ma_data_end(node, mt, pivots, mas->max); return 0; } EXPORT_SYMBOL_GPL(mas_empty_area); @@ -5085,18 +5109,18 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, if (size == 0 || max - min < size - 1) return -EINVAL; - if (mas_is_start(mas)) { + if (mas_is_start(mas)) mas_start(mas); - mas->offset = mas_data_end(mas); - } else if (mas->offset >= 2) { - mas->offset -= 2; - } else if (!mas_rewind_node(mas)) { + else if ((mas->offset < 2) && (!mas_rewind_node(mas))) return -EBUSY; - } - /* Empty set. */ - if (mas_is_none(mas) || mas_is_ptr(mas)) + if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) return mas_sparse_area(mas, min, max, size, false); + else if (mas->offset >= 2) + mas->offset -= 2; + else + mas->offset = mas_data_end(mas); + /* The start of the window can only be within these values. */ mas->index = min; @@ -5122,6 +5146,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, mas->last = max; mas->index = mas->last - size + 1; + mas->end = mas_data_end(mas); return 0; } EXPORT_SYMBOL_GPL(mas_empty_area_rev); @@ -5501,13 +5526,24 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) mas_wr_end_piv(&wr_mas); node_size = mas_wr_new_end(&wr_mas); + + /* Slot store, does not require additional nodes */ + if (node_size == mas->end) { + /* reuse node */ + if (!mt_in_rcu(mas->tree)) + return 0; + /* shifting boundary */ + if (wr_mas.offset_end - mas->offset == 1) + return 0; + } + if (node_size >= mt_slots[wr_mas.type]) { /* Split, worst case for now. */ request = 1 + mas_mt_height(mas) * 2; goto ask_now; } - /* New root needs a singe node */ + /* New root needs a single node */ if (unlikely(mte_is_root(mas->node))) goto ask_now; @@ -5555,7 +5591,7 @@ void mas_destroy(struct ma_state *mas) mas_start(mas); mtree_range_walk(mas); - end = mas_data_end(mas) + 1; + end = mas->end + 1; if (end < mt_min_slot_count(mas->node) - 1) mas_destroy_rebalance(mas, end); @@ -5573,7 +5609,7 @@ void mas_destroy(struct ma_state *mas) mt_free_bulk(count, (void __rcu **)&node->slot[1]); total -= count; } - kmem_cache_free(maple_node_cache, node); + mt_free_one(ma_mnode_ptr(node)); total--; } @@ -5643,33 +5679,46 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) } EXPORT_SYMBOL_GPL(mas_expected_entries); -static inline bool mas_next_setup(struct ma_state *mas, unsigned long max, +static bool mas_next_setup(struct ma_state *mas, unsigned long max, void **entry) { bool was_none = mas_is_none(mas); if (unlikely(mas->last >= max)) { - mas->node = MAS_OVERFLOW; + mas->status = ma_overflow; return true; } - if (mas_is_active(mas)) + switch (mas->status) { + case ma_active: return false; - - if (mas_is_none(mas) || mas_is_paused(mas)) { - mas->node = MAS_START; - } else if (mas_is_overflow(mas)) { + case ma_none: + fallthrough; + case ma_pause: + mas->status = ma_start; + fallthrough; + case ma_start: + mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ + break; + case ma_overflow: /* Overflowed before, but the max changed */ - mas->node = MAS_START; - } else if (mas_is_underflow(mas)) { - mas->node = MAS_START; + mas->status = ma_active; + break; + case ma_underflow: + /* The user expects the mas to be one before where it is */ + mas->status = ma_active; *entry = mas_walk(mas); if (*entry) return true; + break; + case ma_root: + break; + case ma_error: + return true; } - if (mas_is_start(mas)) - *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ + if (likely(mas_is_active(mas))) /* Fast path */ + return false; if (mas_is_ptr(mas)) { *entry = NULL; @@ -5679,7 +5728,7 @@ static inline bool mas_next_setup(struct ma_state *mas, unsigned long max, } mas->index = 1; mas->last = ULONG_MAX; - mas->node = MAS_NONE; + mas->status = ma_none; return true; } @@ -5708,7 +5757,7 @@ void *mas_next(struct ma_state *mas, unsigned long max) return entry; /* Retries on dead nodes handled by mas_next_slot */ - return mas_next_slot(mas, max, false, true); + return mas_next_slot(mas, max, false); } EXPORT_SYMBOL_GPL(mas_next); @@ -5731,7 +5780,7 @@ void *mas_next_range(struct ma_state *mas, unsigned long max) return entry; /* Retries on dead nodes handled by mas_next_slot */ - return mas_next_slot(mas, max, true, true); + return mas_next_slot(mas, max, true); } EXPORT_SYMBOL_GPL(mas_next_range); @@ -5759,37 +5808,48 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max) } EXPORT_SYMBOL_GPL(mt_next); -static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min, - void **entry) +static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry) { if (unlikely(mas->index <= min)) { - mas->node = MAS_UNDERFLOW; + mas->status = ma_underflow; return true; } - if (mas_is_active(mas)) + switch (mas->status) { + case ma_active: return false; - - if (mas_is_overflow(mas)) { - mas->node = MAS_START; + case ma_start: + break; + case ma_none: + fallthrough; + case ma_pause: + mas->status = ma_start; + break; + case ma_underflow: + /* underflowed before but the min changed */ + mas->status = ma_active; + break; + case ma_overflow: + /* User expects mas to be one after where it is */ + mas->status = ma_active; *entry = mas_walk(mas); if (*entry) return true; - } - - if (mas_is_none(mas) || mas_is_paused(mas)) { - mas->node = MAS_START; - } else if (mas_is_underflow(mas)) { - /* underflowed before but the min changed */ - mas->node = MAS_START; + break; + case ma_root: + break; + case ma_error: + return true; } if (mas_is_start(mas)) mas_walk(mas); if (unlikely(mas_is_ptr(mas))) { - if (!mas->index) - goto none; + if (!mas->index) { + mas->status = ma_none; + return true; + } mas->index = mas->last = 0; *entry = mas_root(mas); return true; @@ -5799,7 +5859,7 @@ static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min, if (mas->index) { /* Walked to out-of-range pointer? */ mas->index = mas->last = 0; - mas->node = MAS_ROOT; + mas->status = ma_root; *entry = mas_root(mas); return true; } @@ -5807,10 +5867,6 @@ static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min, } return false; - -none: - mas->node = MAS_NONE; - return true; } /** @@ -5819,7 +5875,7 @@ none: * @min: The minimum value to check. * * Must hold rcu_read_lock or the write lock. - * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not + * Will reset mas to ma_start if the status is ma_none. Will stop on not * searchable nodes. * * Return: the previous value or %NULL. @@ -5831,7 +5887,7 @@ void *mas_prev(struct ma_state *mas, unsigned long min) if (mas_prev_setup(mas, min, &entry)) return entry; - return mas_prev_slot(mas, min, false, true); + return mas_prev_slot(mas, min, false); } EXPORT_SYMBOL_GPL(mas_prev); @@ -5842,7 +5898,7 @@ EXPORT_SYMBOL_GPL(mas_prev); * * Sets @mas->index and @mas->last to the range. * Must hold rcu_read_lock or the write lock. - * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not + * Will reset mas to ma_start if the node is ma_none. Will stop on not * searchable nodes. * * Return: the previous value or %NULL. @@ -5854,7 +5910,7 @@ void *mas_prev_range(struct ma_state *mas, unsigned long min) if (mas_prev_setup(mas, min, &entry)) return entry; - return mas_prev_slot(mas, min, true, true); + return mas_prev_slot(mas, min, true); } EXPORT_SYMBOL_GPL(mas_prev_range); @@ -5897,7 +5953,8 @@ EXPORT_SYMBOL_GPL(mt_prev); */ void mas_pause(struct ma_state *mas) { - mas->node = MAS_PAUSE; + mas->status = ma_pause; + mas->node = NULL; } EXPORT_SYMBOL_GPL(mas_pause); @@ -5909,35 +5966,54 @@ EXPORT_SYMBOL_GPL(mas_pause); * * Returns: True if entry is the answer, false otherwise. */ -static inline bool mas_find_setup(struct ma_state *mas, unsigned long max, - void **entry) +static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry) { - if (mas_is_active(mas)) { + switch (mas->status) { + case ma_active: if (mas->last < max) return false; - return true; - } - - if (mas_is_paused(mas)) { + case ma_start: + break; + case ma_pause: if (unlikely(mas->last >= max)) return true; mas->index = ++mas->last; - mas->node = MAS_START; - } else if (mas_is_none(mas)) { + mas->status = ma_start; + break; + case ma_none: if (unlikely(mas->last >= max)) return true; mas->index = mas->last; - mas->node = MAS_START; - } else if (mas_is_overflow(mas) || mas_is_underflow(mas)) { - if (mas->index > max) { - mas->node = MAS_OVERFLOW; + mas->status = ma_start; + break; + case ma_underflow: + /* mas is pointing at entry before unable to go lower */ + if (unlikely(mas->index >= max)) { + mas->status = ma_overflow; return true; } - mas->node = MAS_START; + mas->status = ma_active; + *entry = mas_walk(mas); + if (*entry) + return true; + break; + case ma_overflow: + if (unlikely(mas->last >= max)) + return true; + + mas->status = ma_active; + *entry = mas_walk(mas); + if (*entry) + return true; + break; + case ma_root: + break; + case ma_error: + return true; } if (mas_is_start(mas)) { @@ -5951,12 +6027,11 @@ static inline bool mas_find_setup(struct ma_state *mas, unsigned long max, } - if (unlikely(!mas_searchable(mas))) { - if (unlikely(mas_is_ptr(mas))) - goto ptr_out_of_range; + if (unlikely(mas_is_ptr(mas))) + goto ptr_out_of_range; + if (unlikely(mas_is_none(mas))) return true; - } if (mas->index == max) return true; @@ -5964,7 +6039,7 @@ static inline bool mas_find_setup(struct ma_state *mas, unsigned long max, return false; ptr_out_of_range: - mas->node = MAS_NONE; + mas->status = ma_none; mas->index = 1; mas->last = ULONG_MAX; return true; @@ -5978,7 +6053,7 @@ ptr_out_of_range: * * Must hold rcu_read_lock or the write lock. * If an entry exists, last and index are updated accordingly. - * May set @mas->node to MAS_NONE. + * May set @mas->status to ma_overflow. * * Return: The entry or %NULL. */ @@ -5990,7 +6065,10 @@ void *mas_find(struct ma_state *mas, unsigned long max) return entry; /* Retries on dead nodes handled by mas_next_slot */ - return mas_next_slot(mas, max, false, false); + entry = mas_next_slot(mas, max, false); + /* Ignore overflow */ + mas->status = ma_active; + return entry; } EXPORT_SYMBOL_GPL(mas_find); @@ -6002,7 +6080,7 @@ EXPORT_SYMBOL_GPL(mas_find); * * Must hold rcu_read_lock or the write lock. * If an entry exists, last and index are updated accordingly. - * May set @mas->node to MAS_NONE. + * May set @mas->status to ma_overflow. * * Return: The entry or %NULL. */ @@ -6014,7 +6092,7 @@ void *mas_find_range(struct ma_state *mas, unsigned long max) return entry; /* Retries on dead nodes handled by mas_next_slot */ - return mas_next_slot(mas, max, true, false); + return mas_next_slot(mas, max, true); } EXPORT_SYMBOL_GPL(mas_find_range); @@ -6026,36 +6104,48 @@ EXPORT_SYMBOL_GPL(mas_find_range); * * Returns: True if entry is the answer, false otherwise. */ -static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, +static bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, void **entry) { - if (mas_is_active(mas)) { - if (mas->index > min) - return false; - - return true; - } - if (mas_is_paused(mas)) { + switch (mas->status) { + case ma_active: + goto active; + case ma_start: + break; + case ma_pause: if (unlikely(mas->index <= min)) { - mas->node = MAS_NONE; + mas->status = ma_underflow; return true; } - mas->node = MAS_START; mas->last = --mas->index; - } else if (mas_is_none(mas)) { + mas->status = ma_start; + break; + case ma_none: if (mas->index <= min) goto none; mas->last = mas->index; - mas->node = MAS_START; - } else if (mas_is_underflow(mas) || mas_is_overflow(mas)) { - if (mas->last <= min) { - mas->node = MAS_UNDERFLOW; + mas->status = ma_start; + break; + case ma_overflow: /* user expects the mas to be one after where it is */ + if (unlikely(mas->index <= min)) { + mas->status = ma_underflow; return true; } - mas->node = MAS_START; + mas->status = ma_active; + break; + case ma_underflow: /* user expects the mas to be one before where it is */ + if (unlikely(mas->index <= min)) + return true; + + mas->status = ma_active; + break; + case ma_root: + break; + case ma_error: + return true; } if (mas_is_start(mas)) { @@ -6068,29 +6158,28 @@ static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, return true; } - if (unlikely(!mas_searchable(mas))) { - if (mas_is_ptr(mas)) - goto none; + if (unlikely(mas_is_ptr(mas))) + goto none; - if (mas_is_none(mas)) { - /* - * Walked to the location, and there was nothing so the - * previous location is 0. - */ - mas->last = mas->index = 0; - mas->node = MAS_ROOT; - *entry = mas_root(mas); - return true; - } + if (unlikely(mas_is_none(mas))) { + /* + * Walked to the location, and there was nothing so the previous + * location is 0. + */ + mas->last = mas->index = 0; + mas->status = ma_root; + *entry = mas_root(mas); + return true; } +active: if (mas->index < min) return true; return false; none: - mas->node = MAS_NONE; + mas->status = ma_none; return true; } @@ -6103,7 +6192,7 @@ none: * * Must hold rcu_read_lock or the write lock. * If an entry exists, last and index are updated accordingly. - * May set @mas->node to MAS_NONE. + * May set @mas->status to ma_underflow. * * Return: The entry or %NULL. */ @@ -6115,7 +6204,7 @@ void *mas_find_rev(struct ma_state *mas, unsigned long min) return entry; /* Retries on dead nodes handled by mas_prev_slot */ - return mas_prev_slot(mas, min, false, false); + return mas_prev_slot(mas, min, false); } EXPORT_SYMBOL_GPL(mas_find_rev); @@ -6129,7 +6218,7 @@ EXPORT_SYMBOL_GPL(mas_find_rev); * * Must hold rcu_read_lock or the write lock. * If an entry exists, last and index are updated accordingly. - * May set @mas->node to MAS_NONE. + * May set @mas->status to ma_underflow. * * Return: The entry or %NULL. */ @@ -6141,7 +6230,7 @@ void *mas_find_range_rev(struct ma_state *mas, unsigned long min) return entry; /* Retries on dead nodes handled by mas_prev_slot */ - return mas_prev_slot(mas, min, true, false); + return mas_prev_slot(mas, min, true); } EXPORT_SYMBOL_GPL(mas_find_range_rev); @@ -6161,8 +6250,8 @@ void *mas_erase(struct ma_state *mas) void *entry; MA_WR_STATE(wr_mas, mas, NULL); - if (mas_is_none(mas) || mas_is_paused(mas)) - mas->node = MAS_START; + if (!mas_is_active(mas) || !mas_is_start(mas)) + mas->status = ma_start; /* Retry unnecessary when holding the write lock. */ entry = mas_state_walk(mas); @@ -6207,7 +6296,7 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp) if (!mas_allocated(mas)) return false; - mas->node = MAS_START; + mas->status = ma_start; return true; } @@ -6402,6 +6491,49 @@ unlock: } EXPORT_SYMBOL(mtree_alloc_range); +/** + * mtree_alloc_cyclic() - Find somewhere to store this entry in the tree. + * @mt: The maple tree. + * @startp: Pointer to ID. + * @range_lo: Lower bound of range to search. + * @range_hi: Upper bound of range to search. + * @entry: The entry to store. + * @next: Pointer to next ID to allocate. + * @gfp: The GFP_FLAGS to use for allocations. + * + * Finds an empty entry in @mt after @next, stores the new index into + * the @id pointer, stores the entry at that index, then updates @next. + * + * @mt must be initialized with the MT_FLAGS_ALLOC_RANGE flag. + * + * Context: Any context. Takes and releases the mt.lock. May sleep if + * the @gfp flags permit. + * + * Return: 0 if the allocation succeeded without wrapping, 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated, -EINVAL if @mt cannot be used, or -EBUSY if there are no + * free entries. + */ +int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp, + void *entry, unsigned long range_lo, unsigned long range_hi, + unsigned long *next, gfp_t gfp) +{ + int ret; + + MA_STATE(mas, mt, 0, 0); + + if (!mt_is_alloc(mt)) + return -EINVAL; + if (WARN_ON_ONCE(mt_is_reserved(entry))) + return -EINVAL; + mtree_lock(mt); + ret = mas_alloc_cyclic(&mas, startp, entry, range_lo, range_hi, + next, gfp); + mtree_unlock(mt); + return ret; +} +EXPORT_SYMBOL(mtree_alloc_cyclic); + int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, void *entry, unsigned long size, unsigned long min, unsigned long max, gfp_t gfp) @@ -6465,6 +6597,278 @@ void *mtree_erase(struct maple_tree *mt, unsigned long index) } EXPORT_SYMBOL(mtree_erase); +/* + * mas_dup_free() - Free an incomplete duplication of a tree. + * @mas: The maple state of a incomplete tree. + * + * The parameter @mas->node passed in indicates that the allocation failed on + * this node. This function frees all nodes starting from @mas->node in the + * reverse order of mas_dup_build(). There is no need to hold the source tree + * lock at this time. + */ +static void mas_dup_free(struct ma_state *mas) +{ + struct maple_node *node; + enum maple_type type; + void __rcu **slots; + unsigned char count, i; + + /* Maybe the first node allocation failed. */ + if (mas_is_none(mas)) + return; + + while (!mte_is_root(mas->node)) { + mas_ascend(mas); + if (mas->offset) { + mas->offset--; + do { + mas_descend(mas); + mas->offset = mas_data_end(mas); + } while (!mte_is_leaf(mas->node)); + + mas_ascend(mas); + } + + node = mte_to_node(mas->node); + type = mte_node_type(mas->node); + slots = ma_slots(node, type); + count = mas_data_end(mas) + 1; + for (i = 0; i < count; i++) + ((unsigned long *)slots)[i] &= ~MAPLE_NODE_MASK; + mt_free_bulk(count, slots); + } + + node = mte_to_node(mas->node); + mt_free_one(node); +} + +/* + * mas_copy_node() - Copy a maple node and replace the parent. + * @mas: The maple state of source tree. + * @new_mas: The maple state of new tree. + * @parent: The parent of the new node. + * + * Copy @mas->node to @new_mas->node, set @parent to be the parent of + * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM. + */ +static inline void mas_copy_node(struct ma_state *mas, struct ma_state *new_mas, + struct maple_pnode *parent) +{ + struct maple_node *node = mte_to_node(mas->node); + struct maple_node *new_node = mte_to_node(new_mas->node); + unsigned long val; + + /* Copy the node completely. */ + memcpy(new_node, node, sizeof(struct maple_node)); + /* Update the parent node pointer. */ + val = (unsigned long)node->parent & MAPLE_NODE_MASK; + new_node->parent = ma_parent_ptr(val | (unsigned long)parent); +} + +/* + * mas_dup_alloc() - Allocate child nodes for a maple node. + * @mas: The maple state of source tree. + * @new_mas: The maple state of new tree. + * @gfp: The GFP_FLAGS to use for allocations. + * + * This function allocates child nodes for @new_mas->node during the duplication + * process. If memory allocation fails, @mas is set to -ENOMEM. + */ +static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas, + gfp_t gfp) +{ + struct maple_node *node = mte_to_node(mas->node); + struct maple_node *new_node = mte_to_node(new_mas->node); + enum maple_type type; + unsigned char request, count, i; + void __rcu **slots; + void __rcu **new_slots; + unsigned long val; + + /* Allocate memory for child nodes. */ + type = mte_node_type(mas->node); + new_slots = ma_slots(new_node, type); + request = mas_data_end(mas) + 1; + count = mt_alloc_bulk(gfp, request, (void **)new_slots); + if (unlikely(count < request)) { + memset(new_slots, 0, request * sizeof(void *)); + mas_set_err(mas, -ENOMEM); + return; + } + + /* Restore node type information in slots. */ + slots = ma_slots(node, type); + for (i = 0; i < count; i++) { + val = (unsigned long)mt_slot_locked(mas->tree, slots, i); + val &= MAPLE_NODE_MASK; + ((unsigned long *)new_slots)[i] |= val; + } +} + +/* + * mas_dup_build() - Build a new maple tree from a source tree + * @mas: The maple state of source tree, need to be in MAS_START state. + * @new_mas: The maple state of new tree, need to be in MAS_START state. + * @gfp: The GFP_FLAGS to use for allocations. + * + * This function builds a new tree in DFS preorder. If the memory allocation + * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the + * last node. mas_dup_free() will free the incomplete duplication of a tree. + * + * Note that the attributes of the two trees need to be exactly the same, and the + * new tree needs to be empty, otherwise -EINVAL will be set in @mas. + */ +static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas, + gfp_t gfp) +{ + struct maple_node *node; + struct maple_pnode *parent = NULL; + struct maple_enode *root; + enum maple_type type; + + if (unlikely(mt_attr(mas->tree) != mt_attr(new_mas->tree)) || + unlikely(!mtree_empty(new_mas->tree))) { + mas_set_err(mas, -EINVAL); + return; + } + + root = mas_start(mas); + if (mas_is_ptr(mas) || mas_is_none(mas)) + goto set_new_tree; + + node = mt_alloc_one(gfp); + if (!node) { + new_mas->status = ma_none; + mas_set_err(mas, -ENOMEM); + return; + } + + type = mte_node_type(mas->node); + root = mt_mk_node(node, type); + new_mas->node = root; + new_mas->min = 0; + new_mas->max = ULONG_MAX; + root = mte_mk_root(root); + while (1) { + mas_copy_node(mas, new_mas, parent); + if (!mte_is_leaf(mas->node)) { + /* Only allocate child nodes for non-leaf nodes. */ + mas_dup_alloc(mas, new_mas, gfp); + if (unlikely(mas_is_err(mas))) + return; + } else { + /* + * This is the last leaf node and duplication is + * completed. + */ + if (mas->max == ULONG_MAX) + goto done; + + /* This is not the last leaf node and needs to go up. */ + do { + mas_ascend(mas); + mas_ascend(new_mas); + } while (mas->offset == mas_data_end(mas)); + + /* Move to the next subtree. */ + mas->offset++; + new_mas->offset++; + } + + mas_descend(mas); + parent = ma_parent_ptr(mte_to_node(new_mas->node)); + mas_descend(new_mas); + mas->offset = 0; + new_mas->offset = 0; + } +done: + /* Specially handle the parent of the root node. */ + mte_to_node(root)->parent = ma_parent_ptr(mas_tree_parent(new_mas)); +set_new_tree: + /* Make them the same height */ + new_mas->tree->ma_flags = mas->tree->ma_flags; + rcu_assign_pointer(new_mas->tree->ma_root, root); +} + +/** + * __mt_dup(): Duplicate an entire maple tree + * @mt: The source maple tree + * @new: The new maple tree + * @gfp: The GFP_FLAGS to use for allocations + * + * This function duplicates a maple tree in Depth-First Search (DFS) pre-order + * traversal. It uses memcpy() to copy nodes in the source tree and allocate + * new child nodes in non-leaf nodes. The new node is exactly the same as the + * source node except for all the addresses stored in it. It will be faster than + * traversing all elements in the source tree and inserting them one by one into + * the new tree. + * The user needs to ensure that the attributes of the source tree and the new + * tree are the same, and the new tree needs to be an empty tree, otherwise + * -EINVAL will be returned. + * Note that the user needs to manually lock the source tree and the new tree. + * + * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If + * the attributes of the two trees are different or the new tree is not an empty + * tree. + */ +int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp) +{ + int ret = 0; + MA_STATE(mas, mt, 0, 0); + MA_STATE(new_mas, new, 0, 0); + + mas_dup_build(&mas, &new_mas, gfp); + if (unlikely(mas_is_err(&mas))) { + ret = xa_err(mas.node); + if (ret == -ENOMEM) + mas_dup_free(&new_mas); + } + + return ret; +} +EXPORT_SYMBOL(__mt_dup); + +/** + * mtree_dup(): Duplicate an entire maple tree + * @mt: The source maple tree + * @new: The new maple tree + * @gfp: The GFP_FLAGS to use for allocations + * + * This function duplicates a maple tree in Depth-First Search (DFS) pre-order + * traversal. It uses memcpy() to copy nodes in the source tree and allocate + * new child nodes in non-leaf nodes. The new node is exactly the same as the + * source node except for all the addresses stored in it. It will be faster than + * traversing all elements in the source tree and inserting them one by one into + * the new tree. + * The user needs to ensure that the attributes of the source tree and the new + * tree are the same, and the new tree needs to be an empty tree, otherwise + * -EINVAL will be returned. + * + * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If + * the attributes of the two trees are different or the new tree is not an empty + * tree. + */ +int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp) +{ + int ret = 0; + MA_STATE(mas, mt, 0, 0); + MA_STATE(new_mas, new, 0, 0); + + mas_lock(&new_mas); + mas_lock_nested(&mas, SINGLE_DEPTH_NESTING); + mas_dup_build(&mas, &new_mas, gfp); + mas_unlock(&mas); + if (unlikely(mas_is_err(&mas))) { + ret = xa_err(mas.node); + if (ret == -ENOMEM) + mas_dup_free(&new_mas); + } + + mas_unlock(&new_mas); + return ret; +} +EXPORT_SYMBOL(mtree_dup); + /** * __mt_destroy() - Walk and free all nodes of a locked maple tree. * @mt: The maple tree @@ -6479,7 +6883,7 @@ void __mt_destroy(struct maple_tree *mt) if (xa_is_node(root)) mte_destroy_walk(root, mt); - mt->ma_flags = 0; + mt->ma_flags = mt_attr(mt); } EXPORT_SYMBOL_GPL(__mt_destroy); @@ -6538,7 +6942,7 @@ retry: if (entry) goto unlock; - while (mas_searchable(&mas) && (mas.last < max)) { + while (mas_is_active(&mas) && (mas.last < max)) { entry = mas_next_entry(&mas, max); if (likely(entry && !xa_is_zero(entry))) break; @@ -6620,26 +7024,6 @@ unsigned int mt_nr_allocated(void) return kmem_cache_nr_allocated(maple_node_cache); } -/* - * mas_dead_node() - Check if the maple state is pointing to a dead node. - * @mas: The maple state - * @index: The index to restore in @mas. - * - * Used in test code. - * Return: 1 if @mas has been reset to MAS_START, 0 otherwise. - */ -static inline int mas_dead_node(struct ma_state *mas, unsigned long index) -{ - if (unlikely(!mas_searchable(mas) || mas_is_start(mas))) - return 0; - - if (likely(!mte_dead_node(mas->node))) - return 0; - - mas_rewalk(mas, index); - return 1; -} - void mt_cache_shrink(void) { } @@ -6678,11 +7062,11 @@ static inline struct maple_enode *mas_get_slot(struct ma_state *mas, static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) { - struct maple_enode *p = MAS_NONE, *mn = mas->node; + struct maple_enode *p, *mn = mas->node; unsigned long p_min, p_max; mas_next_node(mas, mas_mn(mas), max); - if (!mas_is_none(mas)) + if (!mas_is_overflow(mas)) return; if (mte_is_root(mn)) @@ -6695,7 +7079,7 @@ static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) p_min = mas->min; p_max = mas->max; mas_prev_node(mas, 0); - } while (!mas_is_none(mas)); + } while (!mas_is_underflow(mas)); mas->node = p; mas->max = p_max; @@ -6718,7 +7102,6 @@ static void mt_dump_range(unsigned long min, unsigned long max, else pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max); break; - default: case mt_dump_dec: if (min == max) pr_info("%.*s%lu: ", depth * 2, spaces, min); @@ -6758,7 +7141,6 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry, case mt_dump_hex: pr_cont("%p %lX ", node->slot[i], node->pivot[i]); break; - default: case mt_dump_dec: pr_cont("%p %lu ", node->slot[i], node->pivot[i]); } @@ -6788,7 +7170,6 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry, pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n", node, last, max, i); break; - default: case mt_dump_dec: pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", node, last, max, i); @@ -6813,7 +7194,6 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry, case mt_dump_hex: pr_cont("%lx ", node->gap[i]); break; - default: case mt_dump_dec: pr_cont("%lu ", node->gap[i]); } @@ -6824,7 +7204,6 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry, case mt_dump_hex: pr_cont("%p %lX ", node->slot[i], node->pivot[i]); break; - default: case mt_dump_dec: pr_cont("%p %lu ", node->slot[i], node->pivot[i]); } @@ -6965,7 +7344,8 @@ static void mas_validate_gaps(struct ma_state *mas) counted: if (mt == maple_arange_64) { - offset = ma_meta_gap(node, mt); + MT_BUG_ON(mas->tree, !gaps); + offset = ma_meta_gap(node); if (offset > i) { pr_err("gap offset %p[%u] is invalid\n", node, offset); MT_BUG_ON(mas->tree, 1); @@ -6977,7 +7357,6 @@ counted: MT_BUG_ON(mas->tree, 1); } - MT_BUG_ON(mas->tree, !gaps); for (i++ ; i < mt_slot_count(mte); i++) { if (gaps[i] != 0) { pr_err("gap %p[%u] beyond node limit != 0\n", @@ -7155,7 +7534,7 @@ static void mt_validate_nulls(struct maple_tree *mt) MA_STATE(mas, mt, 0, 0); mas_start(&mas); - if (mas_is_none(&mas) || (mas.node == MAS_ROOT)) + if (mas_is_none(&mas) || (mas_is_ptr(&mas))) return; while (!mte_is_leaf(mas.node)) @@ -7172,7 +7551,7 @@ static void mt_validate_nulls(struct maple_tree *mt) last = entry; if (offset == mas_data_end(&mas)) { mas_next_node(&mas, mas_mn(&mas), ULONG_MAX); - if (mas_is_none(&mas)) + if (mas_is_overflow(&mas)) return; offset = 0; slots = ma_slots(mte_to_node(mas.node), @@ -7181,7 +7560,7 @@ static void mt_validate_nulls(struct maple_tree *mt) offset++; } - } while (!mas_is_none(&mas)); + } while (!mas_is_overflow(&mas)); } /* @@ -7196,13 +7575,13 @@ void mt_validate(struct maple_tree *mt) MA_STATE(mas, mt, 0, 0); rcu_read_lock(); mas_start(&mas); - if (!mas_searchable(&mas)) + if (!mas_is_active(&mas)) goto done; while (!mte_is_leaf(mas.node)) mas_descend(&mas); - while (!mas_is_none(&mas)) { + while (!mas_is_overflow(&mas)) { MAS_WARN_ON(&mas, mte_dead_node(mas.node)); end = mas_data_end(&mas); if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) && @@ -7227,16 +7606,35 @@ EXPORT_SYMBOL_GPL(mt_validate); void mas_dump(const struct ma_state *mas) { pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node); - if (mas_is_none(mas)) - pr_err("(MAS_NONE) "); - else if (mas_is_ptr(mas)) - pr_err("(MAS_ROOT) "); - else if (mas_is_start(mas)) - pr_err("(MAS_START) "); - else if (mas_is_paused(mas)) - pr_err("(MAS_PAUSED) "); - - pr_err("[%u] index=%lx last=%lx\n", mas->offset, mas->index, mas->last); + switch (mas->status) { + case ma_active: + pr_err("(ma_active)"); + break; + case ma_none: + pr_err("(ma_none)"); + break; + case ma_root: + pr_err("(ma_root)"); + break; + case ma_start: + pr_err("(ma_start) "); + break; + case ma_pause: + pr_err("(ma_pause) "); + break; + case ma_overflow: + pr_err("(ma_overflow) "); + break; + case ma_underflow: + pr_err("(ma_underflow) "); + break; + case ma_error: + pr_err("(ma_error) "); + break; + } + + pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end, + mas->index, mas->last); pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n", mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags); if (mas->index > mas->last) @@ -7249,7 +7647,7 @@ void mas_wr_dump(const struct ma_wr_state *wr_mas) pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n", wr_mas->node, wr_mas->r_min, wr_mas->r_max); pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n", - wr_mas->type, wr_mas->offset_end, wr_mas->node_end, + wr_mas->type, wr_mas->offset_end, wr_mas->mas->end, wr_mas->end_piv); } EXPORT_SYMBOL_GPL(mas_wr_dump); diff --git a/lib/math/div64.c b/lib/math/div64.c index 55a81782e271..191761b1b623 100644 --- a/lib/math/div64.c +++ b/lib/math/div64.c @@ -22,6 +22,7 @@ #include <linux/export.h> #include <linux/math.h> #include <linux/math64.h> +#include <linux/minmax.h> #include <linux/log2.h> /* Not needed on 64bit architectures */ @@ -191,6 +192,20 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) /* can a * b overflow ? */ if (ilog2(a) + ilog2(b) > 62) { /* + * Note that the algorithm after the if block below might lose + * some precision and the result is more exact for b > a. So + * exchange a and b if a is bigger than b. + * + * For example with a = 43980465100800, b = 100000000, c = 1000000000 + * the below calculation doesn't modify b at all because div == 0 + * and then shift becomes 45 + 26 - 62 = 9 and so the result + * becomes 4398035251080. However with a and b swapped the exact + * result is calculated (i.e. 4398046510080). + */ + if (a > b) + swap(a, b); + + /* * (b * a) / c is equal to * * (b / c) * a + diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c index d42cebf7407f..d3b64b10da1c 100644 --- a/lib/math/prime_numbers.c +++ b/lib/math/prime_numbers.c @@ -6,8 +6,6 @@ #include <linux/prime_numbers.h> #include <linux/slab.h> -#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long)) - struct primes { struct rcu_head rcu; unsigned long last, sz; diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c index 440aee705ccc..20ea9038c3ff 100644 --- a/lib/memcpy_kunit.c +++ b/lib/memcpy_kunit.c @@ -32,7 +32,7 @@ struct some_bytes { BUILD_BUG_ON(sizeof(instance.data) != 32); \ for (size_t i = 0; i < sizeof(instance.data); i++) { \ KUNIT_ASSERT_EQ_MSG(test, instance.data[i], v, \ - "line %d: '%s' not initialized to 0x%02x @ %d (saw 0x%02x)\n", \ + "line %d: '%s' not initialized to 0x%02x @ %zu (saw 0x%02x)\n", \ __LINE__, #instance, v, i, instance.data[i]); \ } \ } while (0) @@ -41,7 +41,7 @@ struct some_bytes { BUILD_BUG_ON(sizeof(one) != sizeof(two)); \ for (size_t i = 0; i < sizeof(one); i++) { \ KUNIT_EXPECT_EQ_MSG(test, one.data[i], two.data[i], \ - "line %d: %s.data[%d] (0x%02x) != %s.data[%d] (0x%02x)\n", \ + "line %d: %s.data[%zu] (0x%02x) != %s.data[%zu] (0x%02x)\n", \ __LINE__, #one, i, one.data[i], #two, i, two.data[i]); \ } \ kunit_info(test, "ok: " TEST_OP "() " name "\n"); \ @@ -309,9 +309,6 @@ static void set_random_nonzero(struct kunit *test, u8 *byte) static void init_large(struct kunit *test) { - if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST)) - kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y"); - /* Get many bit patterns. */ get_random_bytes(large_src, ARRAY_SIZE(large_src)); @@ -496,58 +493,6 @@ static void memmove_overlap_test(struct kunit *test) } } -static void strtomem_test(struct kunit *test) -{ - static const char input[sizeof(unsigned long)] = "hi"; - static const char truncate[] = "this is too long"; - struct { - unsigned long canary1; - unsigned char output[sizeof(unsigned long)] __nonstring; - unsigned long canary2; - } wrap; - - memset(&wrap, 0xFF, sizeof(wrap)); - KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX, - "bad initial canary value"); - KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX, - "bad initial canary value"); - - /* Check unpadded copy leaves surroundings untouched. */ - strtomem(wrap.output, input); - KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); - KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); - KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); - for (size_t i = 2; i < sizeof(wrap.output); i++) - KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF); - KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); - - /* Check truncated copy leaves surroundings untouched. */ - memset(&wrap, 0xFF, sizeof(wrap)); - strtomem(wrap.output, truncate); - KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); - for (size_t i = 0; i < sizeof(wrap.output); i++) - KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); - KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); - - /* Check padded copy leaves only string padded. */ - memset(&wrap, 0xFF, sizeof(wrap)); - strtomem_pad(wrap.output, input, 0xAA); - KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); - KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); - KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); - for (size_t i = 2; i < sizeof(wrap.output); i++) - KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA); - KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); - - /* Check truncated padded copy has no padding. */ - memset(&wrap, 0xFF, sizeof(wrap)); - strtomem(wrap.output, truncate); - KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); - for (size_t i = 0; i < sizeof(wrap.output); i++) - KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); - KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); -} - static struct kunit_case memcpy_test_cases[] = { KUNIT_CASE(memset_test), KUNIT_CASE(memcpy_test), @@ -555,7 +500,6 @@ static struct kunit_case memcpy_test_cases[] = { KUNIT_CASE_SLOW(memmove_test), KUNIT_CASE_SLOW(memmove_large_test), KUNIT_CASE_SLOW(memmove_overlap_test), - KUNIT_CASE(strtomem_test), {} }; diff --git a/lib/nlattr.c b/lib/nlattr.c index dc15e7888fc1..be9c576b6e2d 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -30,6 +30,8 @@ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = { [NLA_S16] = sizeof(s16), [NLA_S32] = sizeof(s32), [NLA_S64] = sizeof(s64), + [NLA_BE16] = sizeof(__be16), + [NLA_BE32] = sizeof(__be32), }; static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { @@ -43,6 +45,8 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { [NLA_S16] = sizeof(s16), [NLA_S32] = sizeof(s32), [NLA_S64] = sizeof(s64), + [NLA_BE16] = sizeof(__be16), + [NLA_BE32] = sizeof(__be32), }; /* @@ -758,7 +762,7 @@ EXPORT_SYMBOL(nla_find); * @dstsize: Size of destination buffer. * * Copies at most dstsize - 1 bytes into the destination buffer. - * Unlike strlcpy the destination buffer is always padded out. + * Unlike strscpy() the destination buffer is always padded out. * * Return: * * srclen - Returns @nla length (not including the trailing %NUL). diff --git a/lib/objpool.c b/lib/objpool.c index ce0087f64400..cfdc02420884 100644 --- a/lib/objpool.c +++ b/lib/objpool.c @@ -201,6 +201,23 @@ static inline void *objpool_try_get_slot(struct objpool_head *pool, int cpu) while (head != READ_ONCE(slot->last)) { void *obj; + /* + * data visibility of 'last' and 'head' could be out of + * order since memory updating of 'last' and 'head' are + * performed in push() and pop() independently + * + * before any retrieving attempts, pop() must guarantee + * 'last' is behind 'head', that is to say, there must + * be available objects in slot, which could be ensured + * by condition 'last != head && last - head <= nr_objs' + * that is equivalent to 'last - head - 1 < nr_objs' as + * 'last' and 'head' are both unsigned int32 + */ + if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) { + head = READ_ONCE(slot->head); + continue; + } + /* obj must be retrieved before moving forward head */ obj = READ_ONCE(slot->entries[head & slot->mask]); diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c index 34db0b3aa502..4ef31b0bb74d 100644 --- a/lib/overflow_kunit.c +++ b/lib/overflow_kunit.c @@ -6,6 +6,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <kunit/device.h> #include <kunit/test.h> #include <linux/device.h> #include <linux/kernel.h> @@ -257,25 +258,84 @@ DEFINE_TEST_ARRAY(s64) = { \ _of = check_ ## op ## _overflow(a, b, &_r); \ KUNIT_EXPECT_EQ_MSG(test, _of, of, \ - "expected "fmt" "sym" "fmt" to%s overflow (type %s)\n", \ + "expected check "fmt" "sym" "fmt" to%s overflow (type %s)\n", \ a, b, of ? "" : " not", #t); \ KUNIT_EXPECT_EQ_MSG(test, _r, r, \ - "expected "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \ + "expected check "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \ a, b, r, _r, #t); \ /* Check for internal macro side-effects. */ \ _of = check_ ## op ## _overflow(_a_orig++, _b_orig++, &_r); \ - KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, "Unexpected " #op " macro side-effect!\n"); \ - KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, "Unexpected " #op " macro side-effect!\n"); \ + KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, \ + "Unexpected check " #op " macro side-effect!\n"); \ + KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, \ + "Unexpected check " #op " macro side-effect!\n"); \ + \ + _r = wrapping_ ## op(t, a, b); \ + KUNIT_EXPECT_TRUE_MSG(test, _r == r, \ + "expected wrap "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \ + a, b, r, _r, #t); \ + /* Check for internal macro side-effects. */ \ + _a_orig = a; \ + _b_orig = b; \ + _r = wrapping_ ## op(t, _a_orig++, _b_orig++); \ + KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, \ + "Unexpected wrap " #op " macro side-effect!\n"); \ + KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, \ + "Unexpected wrap " #op " macro side-effect!\n"); \ +} while (0) + +static int global_counter; +static void bump_counter(void) +{ + global_counter++; +} + +static int get_index(void) +{ + volatile int index = 0; + bump_counter(); + return index; +} + +#define check_self_op(fmt, op, sym, a, b) do { \ + typeof(a + 0) _a = a; \ + typeof(b + 0) _b = b; \ + typeof(a + 0) _a_sym = a; \ + typeof(a + 0) _a_orig[1] = { a }; \ + typeof(b + 0) _b_orig = b; \ + typeof(b + 0) _b_bump = b + 1; \ + typeof(a + 0) _r; \ + \ + _a_sym sym _b; \ + _r = wrapping_ ## op(_a, _b); \ + KUNIT_EXPECT_TRUE_MSG(test, _r == _a_sym, \ + "expected "fmt" "#op" "fmt" == "fmt", got "fmt"\n", \ + a, b, _a_sym, _r); \ + KUNIT_EXPECT_TRUE_MSG(test, _a == _a_sym, \ + "expected "fmt" "#op" "fmt" == "fmt", got "fmt"\n", \ + a, b, _a_sym, _a); \ + /* Check for internal macro side-effects. */ \ + global_counter = 0; \ + wrapping_ ## op(_a_orig[get_index()], _b_orig++); \ + KUNIT_EXPECT_EQ_MSG(test, global_counter, 1, \ + "Unexpected wrapping_" #op " macro side-effect on arg1!\n"); \ + KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, \ + "Unexpected wrapping_" #op " macro side-effect on arg2!\n"); \ } while (0) #define DEFINE_TEST_FUNC_TYPED(n, t, fmt) \ static void do_test_ ## n(struct kunit *test, const struct test_ ## n *p) \ { \ + /* check_{add,sub,mul}_overflow() and wrapping_{add,sub,mul} */ \ check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \ check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \ check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \ check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \ check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \ + /* wrapping_assign_{add,sub}() */ \ + check_self_op(fmt, assign_add, +=, p->a, p->b); \ + check_self_op(fmt, assign_add, +=, p->b, p->a); \ + check_self_op(fmt, assign_sub, -=, p->a, p->b); \ } \ \ static void n ## _overflow_test(struct kunit *test) { \ @@ -618,7 +678,7 @@ static void overflow_allocation_test(struct kunit *test) } while (0) /* Create dummy device for devm_kmalloc()-family tests. */ - dev = root_device_register(device_name); + dev = kunit_device_register(test, device_name); KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), "Cannot register test device\n"); @@ -634,8 +694,6 @@ static void overflow_allocation_test(struct kunit *test) check_allocation_overflow(devm_kmalloc); check_allocation_overflow(devm_kzalloc); - device_unregister(dev); - kunit_info(test, "%d allocation overflow tests finished\n", count); #undef check_allocation_overflow } @@ -1114,6 +1172,24 @@ static void castable_to_type_test(struct kunit *test) #undef TEST_CASTABLE_TO_TYPE } +struct foo { + int a; + u32 counter; + s16 array[] __counted_by(counter); +}; + +static void DEFINE_FLEX_test(struct kunit *test) +{ + DEFINE_RAW_FLEX(struct foo, two, array, 2); + DEFINE_FLEX(struct foo, eight, array, counter, 8); + DEFINE_FLEX(struct foo, empty, array, counter, 0); + + KUNIT_EXPECT_EQ(test, __struct_size(two), + sizeof(struct foo) + sizeof(s16) + sizeof(s16)); + KUNIT_EXPECT_EQ(test, __struct_size(eight), 24); + KUNIT_EXPECT_EQ(test, __struct_size(empty), sizeof(struct foo)); +} + static struct kunit_case overflow_test_cases[] = { KUNIT_CASE(u8_u8__u8_overflow_test), KUNIT_CASE(s8_s8__s8_overflow_test), @@ -1136,6 +1212,7 @@ static struct kunit_case overflow_test_cases[] = { KUNIT_CASE(overflows_type_test), KUNIT_CASE(same_type_test), KUNIT_CASE(castable_to_type_test), + KUNIT_CASE(DEFINE_FLEX_test), {} }; diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c deleted file mode 100644 index ce39ce9f3526..000000000000 --- a/lib/pci_iomap.c +++ /dev/null @@ -1,180 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Implement the default iomap interfaces - * - * (C) Copyright 2004 Linus Torvalds - */ -#include <linux/pci.h> -#include <linux/io.h> - -#include <linux/export.h> - -#ifdef CONFIG_PCI -/** - * pci_iomap_range - create a virtual mapping cookie for a PCI BAR - * @dev: PCI device that owns the BAR - * @bar: BAR number - * @offset: map memory at the given offset in BAR - * @maxlen: max length of the memory to map - * - * Using this function you will get a __iomem address to your device BAR. - * You can access it using ioread*() and iowrite*(). These functions hide - * the details if this is a MMIO or PIO address space and will just do what - * you expect from them in the correct way. - * - * @maxlen specifies the maximum length to map. If you want to get access to - * the complete BAR from offset to the end, pass %0 here. - * */ -void __iomem *pci_iomap_range(struct pci_dev *dev, - int bar, - unsigned long offset, - unsigned long maxlen) -{ - resource_size_t start = pci_resource_start(dev, bar); - resource_size_t len = pci_resource_len(dev, bar); - unsigned long flags = pci_resource_flags(dev, bar); - - if (len <= offset || !start) - return NULL; - len -= offset; - start += offset; - if (maxlen && len > maxlen) - len = maxlen; - if (flags & IORESOURCE_IO) - return __pci_ioport_map(dev, start, len); - if (flags & IORESOURCE_MEM) - return ioremap(start, len); - /* What? */ - return NULL; -} -EXPORT_SYMBOL(pci_iomap_range); - -/** - * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR - * @dev: PCI device that owns the BAR - * @bar: BAR number - * @offset: map memory at the given offset in BAR - * @maxlen: max length of the memory to map - * - * Using this function you will get a __iomem address to your device BAR. - * You can access it using ioread*() and iowrite*(). These functions hide - * the details if this is a MMIO or PIO address space and will just do what - * you expect from them in the correct way. When possible write combining - * is used. - * - * @maxlen specifies the maximum length to map. If you want to get access to - * the complete BAR from offset to the end, pass %0 here. - * */ -void __iomem *pci_iomap_wc_range(struct pci_dev *dev, - int bar, - unsigned long offset, - unsigned long maxlen) -{ - resource_size_t start = pci_resource_start(dev, bar); - resource_size_t len = pci_resource_len(dev, bar); - unsigned long flags = pci_resource_flags(dev, bar); - - - if (flags & IORESOURCE_IO) - return NULL; - - if (len <= offset || !start) - return NULL; - - len -= offset; - start += offset; - if (maxlen && len > maxlen) - len = maxlen; - - if (flags & IORESOURCE_MEM) - return ioremap_wc(start, len); - - /* What? */ - return NULL; -} -EXPORT_SYMBOL_GPL(pci_iomap_wc_range); - -/** - * pci_iomap - create a virtual mapping cookie for a PCI BAR - * @dev: PCI device that owns the BAR - * @bar: BAR number - * @maxlen: length of the memory to map - * - * Using this function you will get a __iomem address to your device BAR. - * You can access it using ioread*() and iowrite*(). These functions hide - * the details if this is a MMIO or PIO address space and will just do what - * you expect from them in the correct way. - * - * @maxlen specifies the maximum length to map. If you want to get access to - * the complete BAR without checking for its length first, pass %0 here. - * */ -void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) -{ - return pci_iomap_range(dev, bar, 0, maxlen); -} -EXPORT_SYMBOL(pci_iomap); - -/** - * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR - * @dev: PCI device that owns the BAR - * @bar: BAR number - * @maxlen: length of the memory to map - * - * Using this function you will get a __iomem address to your device BAR. - * You can access it using ioread*() and iowrite*(). These functions hide - * the details if this is a MMIO or PIO address space and will just do what - * you expect from them in the correct way. When possible write combining - * is used. - * - * @maxlen specifies the maximum length to map. If you want to get access to - * the complete BAR without checking for its length first, pass %0 here. - * */ -void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen) -{ - return pci_iomap_wc_range(dev, bar, 0, maxlen); -} -EXPORT_SYMBOL_GPL(pci_iomap_wc); - -/* - * pci_iounmap() somewhat illogically comes from lib/iomap.c for the - * CONFIG_GENERIC_IOMAP case, because that's the code that knows about - * the different IOMAP ranges. - * - * But if the architecture does not use the generic iomap code, and if - * it has _not_ defined it's own private pci_iounmap function, we define - * it here. - * - * NOTE! This default implementation assumes that if the architecture - * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will - * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [, - * and does not need unmapping with 'ioport_unmap()'. - * - * If you have different rules for your architecture, you need to - * implement your own pci_iounmap() that knows the rules for where - * and how IO vs MEM get mapped. - * - * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes - * from legacy <asm-generic/io.h> header file behavior. In particular, - * it would seem to make sense to do the iounmap(p) for the non-IO-space - * case here regardless, but that's not what the old header file code - * did. Probably incorrectly, but this is meant to be bug-for-bug - * compatible. - */ -#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP) - -void pci_iounmap(struct pci_dev *dev, void __iomem *p) -{ -#ifdef ARCH_HAS_GENERIC_IOPORT_MAP - uintptr_t start = (uintptr_t) PCI_IOBASE; - uintptr_t addr = (uintptr_t) p; - - if (addr >= start && addr < start + IO_SPACE_LIMIT) - return; - iounmap(p); -#endif -} -EXPORT_SYMBOL(pci_iounmap); - -#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */ - -#endif /* CONFIG_PCI */ diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index 1c5420ff254e..385a94aa0b99 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile @@ -21,7 +21,7 @@ altivec_flags += -isystem $(shell $(CC) -print-file-name=include) ifdef CONFIG_CC_IS_CLANG # clang ppc port does not yet support -maltivec when -msoft-float is # enabled. A future release of clang will resolve this -# https://bugs.llvm.org/show_bug.cgi?id=31177 +# https://llvm.org/pr31177 CFLAGS_REMOVE_altivec1.o += -msoft-float CFLAGS_REMOVE_altivec2.o += -msoft-float CFLAGS_REMOVE_altivec4.o += -msoft-float diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc index b25dfc9c7759..863e2d320938 100644 --- a/lib/raid6/s390vx.uc +++ b/lib/raid6/s390vx.uc @@ -12,15 +12,14 @@ */ #include <linux/raid/pq.h> -#include <asm/fpu/api.h> -#include <asm/vx-insn.h> +#include <asm/fpu.h> #define NSIZE 16 -static inline void LOAD_CONST(void) +static __always_inline void LOAD_CONST(void) { - asm volatile("VREPIB %v24,7"); - asm volatile("VREPIB %v25,0x1d"); + fpu_vrepib(24, 0x07); + fpu_vrepib(25, 0x1d); } /* @@ -28,10 +27,7 @@ static inline void LOAD_CONST(void) * vector register y left by 1 bit and stores the result in * vector register x. */ -static inline void SHLBYTE(int x, int y) -{ - asm volatile ("VAB %0,%1,%1" : : "i" (x), "i" (y)); -} +#define SHLBYTE(x, y) fpu_vab(x, y, y) /* * For each of the 16 bytes in the vector register y the MASK() @@ -39,49 +35,17 @@ static inline void SHLBYTE(int x, int y) * or 0x00 if the high bit is 0. The result is stored in vector * register x. */ -static inline void MASK(int x, int y) -{ - asm volatile ("VESRAVB %0,%1,24" : : "i" (x), "i" (y)); -} - -static inline void AND(int x, int y, int z) -{ - asm volatile ("VN %0,%1,%2" : : "i" (x), "i" (y), "i" (z)); -} - -static inline void XOR(int x, int y, int z) -{ - asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z)); -} +#define MASK(x, y) fpu_vesravb(x, y, 24) -static inline void LOAD_DATA(int x, u8 *ptr) -{ - typedef struct { u8 _[16 * $#]; } addrtype; - register addrtype *__ptr asm("1") = (addrtype *) ptr; - - asm volatile ("VLM %2,%3,0,%1" - : : "m" (*__ptr), "a" (__ptr), "i" (x), - "i" (x + $# - 1)); -} - -static inline void STORE_DATA(int x, u8 *ptr) -{ - typedef struct { u8 _[16 * $#]; } addrtype; - register addrtype *__ptr asm("1") = (addrtype *) ptr; - - asm volatile ("VSTM %2,%3,0,1" - : "=m" (*__ptr) : "a" (__ptr), "i" (x), - "i" (x + $# - 1)); -} - -static inline void COPY_VEC(int x, int y) -{ - asm volatile ("VLR %0,%1" : : "i" (x), "i" (y)); -} +#define AND(x, y, z) fpu_vn(x, y, z) +#define XOR(x, y, z) fpu_vx(x, y, z) +#define LOAD_DATA(x, ptr) fpu_vlm(x, x + $# - 1, ptr) +#define STORE_DATA(x, ptr) fpu_vstm(x, x + $# - 1, ptr) +#define COPY_VEC(x, y) fpu_vlr(x, y) static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs) { - struct kernel_fpu vxstate; + DECLARE_KERNEL_FPU_ONSTACK32(vxstate); u8 **dptr, *p, *q; int d, z, z0; @@ -114,7 +78,7 @@ static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs) static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop, size_t bytes, void **ptrs) { - struct kernel_fpu vxstate; + DECLARE_KERNEL_FPU_ONSTACK32(vxstate); u8 **dptr, *p, *q; int d, z, z0; @@ -158,7 +122,7 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop, static int raid6_s390vx$#_valid(void) { - return MACHINE_HAS_VX; + return cpu_has_vx(); } const struct raid6_calls raid6_s390vx$# = { diff --git a/lib/sbitmap.c b/lib/sbitmap.c index d0a5081dfd12..1e453f825c05 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -388,11 +388,6 @@ static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq, unsigned int shallow_depth; /* - * For each batch, we wake up one queue. We need to make sure that our - * batch size is small enough that the full depth of the bitmap, - * potentially limited by a shallow depth, is enough to wake up all of - * the queues. - * * Each full word of the bitmap has bits_per_word bits, and there might * be a partial word. There are depth / bits_per_word full words and * depth % bits_per_word bits left over. In bitwise arithmetic: @@ -499,18 +494,18 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, struct sbitmap_word *map = &sb->map[index]; unsigned long get_mask; unsigned int map_depth = __map_depth(sb, index); + unsigned long val; sbitmap_deferred_clear(map); - if (map->word == (1UL << (map_depth - 1)) - 1) + val = READ_ONCE(map->word); + if (val == (1UL << (map_depth - 1)) - 1) goto next; - nr = find_first_zero_bit(&map->word, map_depth); + nr = find_first_zero_bit(&val, map_depth); if (nr + nr_tags <= map_depth) { atomic_long_t *ptr = (atomic_long_t *) &map->word; - unsigned long val; get_mask = ((1UL << nr_tags) - 1) << nr; - val = READ_ONCE(map->word); while (!atomic_long_try_cmpxchg(ptr, &val, get_mask | val)) ; diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 68b45c82c37a..7bc2220fea80 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -1124,7 +1124,7 @@ static ssize_t extract_user_to_sg(struct iov_iter *iter, do { res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max, extraction_flags, &off); - if (res < 0) + if (res <= 0) goto failed; len = res; diff --git a/lib/seq_buf.c b/lib/seq_buf.c index 45c450f423fa..f3f3436d60a9 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c @@ -13,16 +13,26 @@ * seq_buf_init() more than once to reset the seq_buf to start * from scratch. */ -#include <linux/uaccess.h> -#include <linux/seq_file.h> + +#include <linux/bug.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/hex.h> +#include <linux/minmax.h> +#include <linux/printk.h> #include <linux/seq_buf.h> +#include <linux/seq_file.h> +#include <linux/sprintf.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/uaccess.h> /** * seq_buf_can_fit - can the new data fit in the current buffer? * @s: the seq_buf descriptor * @len: The length to see if it can fit in the current buffer * - * Returns true if there's enough unused space in the seq_buf buffer + * Returns: true if there's enough unused space in the seq_buf buffer * to fit the amount of new data according to @len. */ static bool seq_buf_can_fit(struct seq_buf *s, size_t len) @@ -35,7 +45,7 @@ static bool seq_buf_can_fit(struct seq_buf *s, size_t len) * @m: the seq_file descriptor that is the destination * @s: the seq_buf descriptor that is the source. * - * Returns zero on success, non zero otherwise + * Returns: zero on success, non-zero otherwise. */ int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s) { @@ -50,9 +60,9 @@ int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s) * @fmt: printf format string * @args: va_list of arguments from a printf() type function * - * Writes a vnprintf() format into the sequencce buffer. + * Writes a vnprintf() format into the sequence buffer. * - * Returns zero on success, -1 on overflow. + * Returns: zero on success, -1 on overflow. */ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args) { @@ -78,7 +88,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args) * * Writes a printf() format into the sequence buffer. * - * Returns zero on success, -1 on overflow. + * Returns: zero on success, -1 on overflow. */ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...) { @@ -94,12 +104,12 @@ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...) EXPORT_SYMBOL_GPL(seq_buf_printf); /** - * seq_buf_do_printk - printk seq_buf line by line + * seq_buf_do_printk - printk() seq_buf line by line * @s: seq_buf descriptor * @lvl: printk level * * printk()-s a multi-line sequential buffer line by line. The function - * makes sure that the buffer in @s is nul terminated and safe to read + * makes sure that the buffer in @s is NUL-terminated and safe to read * as a string. */ void seq_buf_do_printk(struct seq_buf *s, const char *lvl) @@ -109,9 +119,7 @@ void seq_buf_do_printk(struct seq_buf *s, const char *lvl) if (s->size == 0 || s->len == 0) return; - seq_buf_terminate(s); - - start = s->buffer; + start = seq_buf_str(s); while ((lf = strchr(start, '\n'))) { int len = lf - start + 1; @@ -141,7 +149,7 @@ EXPORT_SYMBOL_GPL(seq_buf_do_printk); * This function will take the format and the binary array and finish * the conversion into the ASCII string within the buffer. * - * Returns zero on success, -1 on overflow. + * Returns: zero on success, -1 on overflow. */ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary) { @@ -169,7 +177,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary) * * Copy a simple string into the sequence buffer. * - * Returns zero on success, -1 on overflow + * Returns: zero on success, -1 on overflow. */ int seq_buf_puts(struct seq_buf *s, const char *str) { @@ -189,6 +197,7 @@ int seq_buf_puts(struct seq_buf *s, const char *str) seq_buf_set_overflow(s); return -1; } +EXPORT_SYMBOL_GPL(seq_buf_puts); /** * seq_buf_putc - sequence printing of simple character @@ -197,7 +206,7 @@ int seq_buf_puts(struct seq_buf *s, const char *str) * * Copy a single character into the sequence buffer. * - * Returns zero on success, -1 on overflow + * Returns: zero on success, -1 on overflow. */ int seq_buf_putc(struct seq_buf *s, unsigned char c) { @@ -210,9 +219,10 @@ int seq_buf_putc(struct seq_buf *s, unsigned char c) seq_buf_set_overflow(s); return -1; } +EXPORT_SYMBOL_GPL(seq_buf_putc); /** - * seq_buf_putmem - write raw data into the sequenc buffer + * seq_buf_putmem - write raw data into the sequence buffer * @s: seq_buf descriptor * @mem: The raw memory to copy into the buffer * @len: The length of the raw memory to copy (in bytes) @@ -221,7 +231,7 @@ int seq_buf_putc(struct seq_buf *s, unsigned char c) * buffer and a strcpy() would not work. Using this function allows * for such cases. * - * Returns zero on success, -1 on overflow + * Returns: zero on success, -1 on overflow. */ int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len) { @@ -249,7 +259,7 @@ int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len) * raw memory into the buffer it writes its ASCII representation of it * in hex characters. * - * Returns zero on success, -1 on overflow + * Returns: zero on success, -1 on overflow. */ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, unsigned int len) @@ -297,7 +307,7 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, * * Write a path name into the sequence buffer. * - * Returns the number of written bytes on success, -1 on overflow + * Returns: the number of written bytes on success, -1 on overflow. */ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc) { @@ -324,23 +334,25 @@ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc) * seq_buf_to_user - copy the sequence buffer to user space * @s: seq_buf descriptor * @ubuf: The userspace memory location to copy to + * @start: The first byte in the buffer to copy * @cnt: The amount to copy * * Copies the sequence buffer into the userspace memory pointed to - * by @ubuf. It starts from the last read position (@s->readpos) - * and writes up to @cnt characters or till it reaches the end of - * the content in the buffer (@s->len), which ever comes first. + * by @ubuf. It starts from @start and writes up to @cnt characters + * or until it reaches the end of the content in the buffer (@s->len), + * whichever comes first. * + * Returns: * On success, it returns a positive number of the number of bytes * it copied. * * On failure it returns -EBUSY if all of the content in the * sequence has been already read, which includes nothing in the - * sequence (@s->len == @s->readpos). + * sequence (@s->len == @start). * * Returns -EFAULT if the copy to userspace fails. */ -int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt) +int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, size_t start, int cnt) { int len; int ret; @@ -350,20 +362,17 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt) len = seq_buf_used(s); - if (len <= s->readpos) + if (len <= start) return -EBUSY; - len -= s->readpos; + len -= start; if (cnt > len) cnt = len; - ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); + ret = copy_to_user(ubuf, s->buffer + start, cnt); if (ret == cnt) return -EFAULT; - cnt -= ret; - - s->readpos += cnt; - return cnt; + return cnt - ret; } /** @@ -384,11 +393,11 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt) * linebuf size is maximal length for one line. * 32 * 3 - maximum bytes per line, each printed into 2 chars + 1 for * separating space - * 2 - spaces separating hex dump and ascii representation - * 32 - ascii representation + * 2 - spaces separating hex dump and ASCII representation + * 32 - ASCII representation * 1 - terminating '\0' * - * Returns zero on success, -1 on overflow + * Returns: zero on success, -1 on overflow. */ int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str, int prefix_type, int rowsize, int groupsize, diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c index d4a3730b08fa..4ce960438806 100644 --- a/lib/slub_kunit.c +++ b/lib/slub_kunit.c @@ -55,7 +55,7 @@ static void test_next_pointer(struct kunit *test) ptr_addr = (unsigned long *)(p + s->offset); tmp = *ptr_addr; - p[s->offset] = 0x12; + p[s->offset] = ~p[s->offset]; /* * Expecting three errors. diff --git a/lib/sort.c b/lib/sort.c index b399bf10d675..a0509088f82a 100644 --- a/lib/sort.c +++ b/lib/sort.c @@ -215,6 +215,7 @@ void sort_r(void *base, size_t num, size_t size, /* pre-scale counters for performance */ size_t n = num * size, a = (num/2) * size; const unsigned int lsbit = size & -size; /* Used to find parent */ + size_t shift = 0; if (!a) /* num < 2 || size == 0 */ return; @@ -242,12 +243,21 @@ void sort_r(void *base, size_t num, size_t size, for (;;) { size_t b, c, d; - if (a) /* Building heap: sift down --a */ - a -= size; - else if (n -= size) /* Sorting: Extract root to --n */ + if (a) /* Building heap: sift down a */ + a -= size << shift; + else if (n > 3 * size) { /* Sorting: Extract two largest elements */ + n -= size; do_swap(base, base + n, size, swap_func, priv); - else /* Sort complete */ + shift = do_cmp(base + size, base + 2 * size, cmp_func, priv) <= 0; + a = size << shift; + n -= size; + do_swap(base + a, base + n, size, swap_func, priv); + } else if (n > size) { /* Sorting: Extract root */ + n -= size; + do_swap(base, base + n, size, swap_func, priv); + } else { /* Sort complete */ break; + } /* * Sift element at "a" down into heap. This is the @@ -262,7 +272,7 @@ void sort_r(void *base, size_t num, size_t size, * average, 3/4 worst-case.) */ for (b = a; c = 2*b + size, (d = c + size) < n;) - b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d; + b = do_cmp(base + c, base + d, cmp_func, priv) > 0 ? c : d; if (d == n) /* Special case last leaf with no sibling */ b = c; diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 2f5aa851834e..cd8f23455285 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -14,15 +14,21 @@ #define pr_fmt(fmt) "stackdepot: " fmt +#include <linux/debugfs.h> #include <linux/gfp.h> #include <linux/jhash.h> #include <linux/kernel.h> #include <linux/kmsan.h> +#include <linux/list.h> #include <linux/mm.h> #include <linux/mutex.h> -#include <linux/percpu.h> +#include <linux/poison.h> #include <linux/printk.h> +#include <linux/rculist.h> +#include <linux/rcupdate.h> +#include <linux/refcount.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/stacktrace.h> #include <linux/stackdepot.h> #include <linux/string.h> @@ -30,38 +36,11 @@ #include <linux/memblock.h> #include <linux/kasan-enabled.h> -#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8) - -#define DEPOT_VALID_BITS 1 -#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */ -#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER)) -#define DEPOT_STACK_ALIGN 4 -#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN) -#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_VALID_BITS - \ - DEPOT_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS) #define DEPOT_POOLS_CAP 8192 +/* The pool_index is offset by 1 so the first record does not have a 0 handle. */ #define DEPOT_MAX_POOLS \ - (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \ - (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP) - -/* Compact structure that stores a reference to a stack. */ -union handle_parts { - depot_stack_handle_t handle; - struct { - u32 pool_index : DEPOT_POOL_INDEX_BITS; - u32 offset : DEPOT_OFFSET_BITS; - u32 valid : DEPOT_VALID_BITS; - u32 extra : STACK_DEPOT_EXTRA_BITS; - }; -}; - -struct stack_record { - struct stack_record *next; /* Link in the hash table */ - u32 hash; /* Hash in the hash table */ - u32 size; /* Number of stored frames */ - union handle_parts handle; - unsigned long entries[]; /* Variable-sized array of frames */ -}; + (((1LL << (DEPOT_POOL_INDEX_BITS)) - 1 < DEPOT_POOLS_CAP) ? \ + (1LL << (DEPOT_POOL_INDEX_BITS)) - 1 : DEPOT_POOLS_CAP) static bool stack_depot_disabled; static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT); @@ -75,40 +54,50 @@ static bool __stack_depot_early_init_passed __initdata; /* Initial seed for jhash2. */ #define STACK_HASH_SEED 0x9747b28c -/* Hash table of pointers to stored stack traces. */ -static struct stack_record **stack_table; +/* Hash table of stored stack records. */ +static struct list_head *stack_table; /* Fixed order of the number of table buckets. Used when KASAN is enabled. */ static unsigned int stack_bucket_number_order; /* Hash mask for indexing the table. */ static unsigned int stack_hash_mask; -/* Array of memory regions that store stack traces. */ +/* Array of memory regions that store stack records. */ static void *stack_pools[DEPOT_MAX_POOLS]; -/* Currently used pool in stack_pools. */ -static int pool_index; +/* Newly allocated pool that is not yet added to stack_pools. */ +static void *new_pool; +/* Number of pools in stack_pools. */ +static int pools_num; /* Offset to the unused space in the currently used pool. */ -static size_t pool_offset; -/* Lock that protects the variables above. */ +static size_t pool_offset = DEPOT_POOL_SIZE; +/* Freelist of stack records within stack_pools. */ +static LIST_HEAD(free_stacks); +/* The lock must be held when performing pool or freelist modifications. */ static DEFINE_RAW_SPINLOCK(pool_lock); -/* - * Stack depot tries to keep an extra pool allocated even before it runs out - * of space in the currently used pool. - * This flag marks that this next extra pool needs to be allocated and - * initialized. It has the value 0 when either the next pool is not yet - * initialized or the limit on the number of pools is reached. - */ -static int next_pool_required = 1; + +/* Statistics counters for debugfs. */ +enum depot_counter_id { + DEPOT_COUNTER_REFD_ALLOCS, + DEPOT_COUNTER_REFD_FREES, + DEPOT_COUNTER_REFD_INUSE, + DEPOT_COUNTER_FREELIST_SIZE, + DEPOT_COUNTER_PERSIST_COUNT, + DEPOT_COUNTER_PERSIST_BYTES, + DEPOT_COUNTER_COUNT, +}; +static long counters[DEPOT_COUNTER_COUNT]; +static const char *const counter_names[] = { + [DEPOT_COUNTER_REFD_ALLOCS] = "refcounted_allocations", + [DEPOT_COUNTER_REFD_FREES] = "refcounted_frees", + [DEPOT_COUNTER_REFD_INUSE] = "refcounted_in_use", + [DEPOT_COUNTER_FREELIST_SIZE] = "freelist_size", + [DEPOT_COUNTER_PERSIST_COUNT] = "persistent_count", + [DEPOT_COUNTER_PERSIST_BYTES] = "persistent_bytes", +}; +static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT); static int __init disable_stack_depot(char *str) { - int ret; - - ret = kstrtobool(str, &stack_depot_disabled); - if (!ret && stack_depot_disabled) { - pr_info("disabled\n"); - stack_table = NULL; - } - return 0; + return kstrtobool(str, &stack_depot_disabled); } early_param("stack_depot_disable", disable_stack_depot); @@ -120,6 +109,15 @@ void __init stack_depot_request_early_init(void) __stack_depot_early_init_requested = true; } +/* Initialize list_head's within the hash table. */ +static void init_stack_table(unsigned long entries) +{ + unsigned long i; + + for (i = 0; i < entries; i++) + INIT_LIST_HEAD(&stack_table[i]); +} + /* Allocates a hash table via memblock. Can only be used during early boot. */ int __init stack_depot_early_init(void) { @@ -131,6 +129,15 @@ int __init stack_depot_early_init(void) __stack_depot_early_init_passed = true; /* + * Print disabled message even if early init has not been requested: + * stack_depot_init() will not print one. + */ + if (stack_depot_disabled) { + pr_info("disabled\n"); + return 0; + } + + /* * If KASAN is enabled, use the maximum order: KASAN is frequently used * in fuzzing scenarios, which leads to a large number of different * stack traces being stored in stack depot. @@ -138,21 +145,25 @@ int __init stack_depot_early_init(void) if (kasan_enabled() && !stack_bucket_number_order) stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX; - if (!__stack_depot_early_init_requested || stack_depot_disabled) + /* + * Check if early init has been requested after setting + * stack_bucket_number_order: stack_depot_init() uses its value. + */ + if (!__stack_depot_early_init_requested) return 0; /* * If stack_bucket_number_order is not set, leave entries as 0 to rely - * on the automatic calculations performed by alloc_large_system_hash. + * on the automatic calculations performed by alloc_large_system_hash(). */ if (stack_bucket_number_order) entries = 1UL << stack_bucket_number_order; pr_info("allocating hash table via alloc_large_system_hash\n"); stack_table = alloc_large_system_hash("stackdepot", - sizeof(struct stack_record *), + sizeof(struct list_head), entries, STACK_HASH_TABLE_SCALE, - HASH_EARLY | HASH_ZERO, + HASH_EARLY, NULL, &stack_hash_mask, 1UL << STACK_BUCKET_NUMBER_ORDER_MIN, @@ -162,6 +173,14 @@ int __init stack_depot_early_init(void) stack_depot_disabled = true; return -ENOMEM; } + if (!entries) { + /* + * Obtain the number of entries that was calculated by + * alloc_large_system_hash(). + */ + entries = stack_hash_mask + 1; + } + init_stack_table(entries); return 0; } @@ -202,7 +221,7 @@ int stack_depot_init(void) entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX; pr_info("allocating hash table of %lu entries via kvcalloc\n", entries); - stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL); + stack_table = kvcalloc(entries, sizeof(struct list_head), GFP_KERNEL); if (!stack_table) { pr_err("hash table allocation failed, disabling\n"); stack_depot_disabled = true; @@ -210,6 +229,7 @@ int stack_depot_init(void) goto out_unlock; } stack_hash_mask = entries - 1; + init_stack_table(entries); out_unlock: mutex_unlock(&stack_depot_init_mutex); @@ -218,104 +238,276 @@ out_unlock: } EXPORT_SYMBOL_GPL(stack_depot_init); -/* Uses preallocated memory to initialize a new stack depot pool. */ -static void depot_init_pool(void **prealloc) +/* + * Initializes new stack pool, and updates the list of pools. + */ +static bool depot_init_pool(void **prealloc) { + lockdep_assert_held(&pool_lock); + + if (unlikely(pools_num >= DEPOT_MAX_POOLS)) { + /* Bail out if we reached the pool limit. */ + WARN_ON_ONCE(pools_num > DEPOT_MAX_POOLS); /* should never happen */ + WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */ + WARN_ONCE(1, "Stack depot reached limit capacity"); + return false; + } + + if (!new_pool && *prealloc) { + /* We have preallocated memory, use it. */ + WRITE_ONCE(new_pool, *prealloc); + *prealloc = NULL; + } + + if (!new_pool) + return false; /* new_pool and *prealloc are NULL */ + + /* Save reference to the pool to be used by depot_fetch_stack(). */ + stack_pools[pools_num] = new_pool; + /* - * If the next pool is already initialized or the maximum number of + * Stack depot tries to keep an extra pool allocated even before it runs + * out of space in the currently used pool. + * + * To indicate that a new preallocation is needed new_pool is reset to + * NULL; do not reset to NULL if we have reached the maximum number of + * pools. + */ + if (pools_num < DEPOT_MAX_POOLS) + WRITE_ONCE(new_pool, NULL); + else + WRITE_ONCE(new_pool, STACK_DEPOT_POISON); + + /* Pairs with concurrent READ_ONCE() in depot_fetch_stack(). */ + WRITE_ONCE(pools_num, pools_num + 1); + ASSERT_EXCLUSIVE_WRITER(pools_num); + + pool_offset = 0; + + return true; +} + +/* Keeps the preallocated memory to be used for a new stack depot pool. */ +static void depot_keep_new_pool(void **prealloc) +{ + lockdep_assert_held(&pool_lock); + + /* + * If a new pool is already saved or the maximum number of * pools is reached, do not use the preallocated memory. - * smp_load_acquire() here pairs with smp_store_release() below and - * in depot_alloc_stack(). */ - if (!smp_load_acquire(&next_pool_required)) + if (new_pool) return; - /* Check if the current pool is not yet allocated. */ - if (stack_pools[pool_index] == NULL) { - /* Use the preallocated memory for the current pool. */ - stack_pools[pool_index] = *prealloc; - *prealloc = NULL; - } else { - /* - * Otherwise, use the preallocated memory for the next pool - * as long as we do not exceed the maximum number of pools. - */ - if (pool_index + 1 < DEPOT_MAX_POOLS) { - stack_pools[pool_index + 1] = *prealloc; - *prealloc = NULL; - } - /* - * At this point, either the next pool is initialized or the - * maximum number of pools is reached. In either case, take - * note that initializing another pool is not required. - * This smp_store_release pairs with smp_load_acquire() above - * and in stack_depot_save(). - */ - smp_store_release(&next_pool_required, 0); + WRITE_ONCE(new_pool, *prealloc); + *prealloc = NULL; +} + +/* + * Try to initialize a new stack record from the current pool, a cached pool, or + * the current pre-allocation. + */ +static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size) +{ + struct stack_record *stack; + void *current_pool; + u32 pool_index; + + lockdep_assert_held(&pool_lock); + + if (pool_offset + size > DEPOT_POOL_SIZE) { + if (!depot_init_pool(prealloc)) + return NULL; } + + if (WARN_ON_ONCE(pools_num < 1)) + return NULL; + pool_index = pools_num - 1; + current_pool = stack_pools[pool_index]; + if (WARN_ON_ONCE(!current_pool)) + return NULL; + + stack = current_pool + pool_offset; + + /* Pre-initialize handle once. */ + stack->handle.pool_index_plus_1 = pool_index + 1; + stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN; + stack->handle.extra = 0; + INIT_LIST_HEAD(&stack->hash_list); + + pool_offset += size; + + return stack; +} + +/* Try to find next free usable entry from the freelist. */ +static struct stack_record *depot_pop_free(void) +{ + struct stack_record *stack; + + lockdep_assert_held(&pool_lock); + + if (list_empty(&free_stacks)) + return NULL; + + /* + * We maintain the invariant that the elements in front are least + * recently used, and are therefore more likely to be associated with an + * RCU grace period in the past. Consequently it is sufficient to only + * check the first entry. + */ + stack = list_first_entry(&free_stacks, struct stack_record, free_list); + if (!poll_state_synchronize_rcu(stack->rcu_state)) + return NULL; + + list_del(&stack->free_list); + counters[DEPOT_COUNTER_FREELIST_SIZE]--; + + return stack; +} + +static inline size_t depot_stack_record_size(struct stack_record *s, unsigned int nr_entries) +{ + const size_t used = flex_array_size(s, entries, nr_entries); + const size_t unused = sizeof(s->entries) - used; + + WARN_ON_ONCE(sizeof(s->entries) < used); + + return ALIGN(sizeof(struct stack_record) - unused, 1 << DEPOT_STACK_ALIGN); } /* Allocates a new stack in a stack depot pool. */ static struct stack_record * -depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) +depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc) { - struct stack_record *stack; - size_t required_size = struct_size(stack, entries, size); + struct stack_record *stack = NULL; + size_t record_size; - required_size = ALIGN(required_size, 1 << DEPOT_STACK_ALIGN); + lockdep_assert_held(&pool_lock); - /* Check if there is not enough space in the current pool. */ - if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) { - /* Bail out if we reached the pool limit. */ - if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) { - WARN_ONCE(1, "Stack depot reached limit capacity"); - return NULL; - } + /* This should already be checked by public API entry points. */ + if (WARN_ON_ONCE(!nr_entries)) + return NULL; + /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */ + if (nr_entries > CONFIG_STACKDEPOT_MAX_FRAMES) + nr_entries = CONFIG_STACKDEPOT_MAX_FRAMES; + + if (flags & STACK_DEPOT_FLAG_GET) { /* - * Move on to the next pool. - * WRITE_ONCE pairs with potential concurrent read in - * stack_depot_fetch(). - */ - WRITE_ONCE(pool_index, pool_index + 1); - pool_offset = 0; - /* - * If the maximum number of pools is not reached, take note - * that the next pool needs to initialized. - * smp_store_release() here pairs with smp_load_acquire() in - * stack_depot_save() and depot_init_pool(). + * Evictable entries have to allocate the max. size so they may + * safely be re-used by differently sized allocations. */ - if (pool_index + 1 < DEPOT_MAX_POOLS) - smp_store_release(&next_pool_required, 1); + record_size = depot_stack_record_size(stack, CONFIG_STACKDEPOT_MAX_FRAMES); + stack = depot_pop_free(); + } else { + record_size = depot_stack_record_size(stack, nr_entries); } - /* Assign the preallocated memory to a pool if required. */ - if (*prealloc) - depot_init_pool(prealloc); - - /* Check if we have a pool to save the stack trace. */ - if (stack_pools[pool_index] == NULL) - return NULL; + if (!stack) { + stack = depot_pop_free_pool(prealloc, record_size); + if (!stack) + return NULL; + } /* Save the stack trace. */ - stack = stack_pools[pool_index] + pool_offset; stack->hash = hash; - stack->size = size; - stack->handle.pool_index = pool_index; - stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN; - stack->handle.valid = 1; - stack->handle.extra = 0; - memcpy(stack->entries, entries, flex_array_size(stack, entries, size)); - pool_offset += required_size; + stack->size = nr_entries; + /* stack->handle is already filled in by depot_pop_free_pool(). */ + memcpy(stack->entries, entries, flex_array_size(stack, entries, nr_entries)); + + if (flags & STACK_DEPOT_FLAG_GET) { + refcount_set(&stack->count, 1); + counters[DEPOT_COUNTER_REFD_ALLOCS]++; + counters[DEPOT_COUNTER_REFD_INUSE]++; + } else { + /* Warn on attempts to switch to refcounting this entry. */ + refcount_set(&stack->count, REFCOUNT_SATURATED); + counters[DEPOT_COUNTER_PERSIST_COUNT]++; + counters[DEPOT_COUNTER_PERSIST_BYTES] += record_size; + } + /* * Let KMSAN know the stored stack record is initialized. This shall * prevent false positive reports if instrumented code accesses it. */ - kmsan_unpoison_memory(stack, required_size); + kmsan_unpoison_memory(stack, record_size); return stack; } +static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) +{ + const int pools_num_cached = READ_ONCE(pools_num); + union handle_parts parts = { .handle = handle }; + void *pool; + u32 pool_index = parts.pool_index_plus_1 - 1; + size_t offset = parts.offset << DEPOT_STACK_ALIGN; + struct stack_record *stack; + + lockdep_assert_not_held(&pool_lock); + + if (pool_index >= pools_num_cached) { + WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", + pool_index, pools_num_cached, handle); + return NULL; + } + + pool = stack_pools[pool_index]; + if (WARN_ON(!pool)) + return NULL; + + stack = pool + offset; + if (WARN_ON(!refcount_read(&stack->count))) + return NULL; + + return stack; +} + +/* Links stack into the freelist. */ +static void depot_free_stack(struct stack_record *stack) +{ + unsigned long flags; + + lockdep_assert_not_held(&pool_lock); + + raw_spin_lock_irqsave(&pool_lock, flags); + printk_deferred_enter(); + + /* + * Remove the entry from the hash list. Concurrent list traversal may + * still observe the entry, but since the refcount is zero, this entry + * will no longer be considered as valid. + */ + list_del_rcu(&stack->hash_list); + + /* + * Due to being used from constrained contexts such as the allocators, + * NMI, or even RCU itself, stack depot cannot rely on primitives that + * would sleep (such as synchronize_rcu()) or recursively call into + * stack depot again (such as call_rcu()). + * + * Instead, get an RCU cookie, so that we can ensure this entry isn't + * moved onto another list until the next grace period, and concurrent + * RCU list traversal remains safe. + */ + stack->rcu_state = get_state_synchronize_rcu(); + + /* + * Add the entry to the freelist tail, so that older entries are + * considered first - their RCU cookie is more likely to no longer be + * associated with the current grace period. + */ + list_add_tail(&stack->free_list, &free_stacks); + + counters[DEPOT_COUNTER_FREELIST_SIZE]++; + counters[DEPOT_COUNTER_REFD_FREES]++; + counters[DEPOT_COUNTER_REFD_INUSE]--; + + printk_deferred_exit(); + raw_spin_unlock_irqrestore(&pool_lock, flags); +} + /* Calculates the hash for a stack. */ static inline u32 hash_stack(unsigned long *entries, unsigned int size) { @@ -340,32 +532,72 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2, } /* Finds a stack in a bucket of the hash table. */ -static inline struct stack_record *find_stack(struct stack_record *bucket, - unsigned long *entries, int size, - u32 hash) +static inline struct stack_record *find_stack(struct list_head *bucket, + unsigned long *entries, int size, + u32 hash, depot_flags_t flags) { - struct stack_record *found; + struct stack_record *stack, *ret = NULL; - for (found = bucket; found; found = found->next) { - if (found->hash == hash && - found->size == size && - !stackdepot_memcmp(entries, found->entries, size)) - return found; + /* + * Stack depot may be used from instrumentation that instruments RCU or + * tracing itself; use variant that does not call into RCU and cannot be + * traced. + * + * Note: Such use cases must take care when using refcounting to evict + * unused entries, because the stack record free-then-reuse code paths + * do call into RCU. + */ + rcu_read_lock_sched_notrace(); + + list_for_each_entry_rcu(stack, bucket, hash_list) { + if (stack->hash != hash || stack->size != size) + continue; + + /* + * This may race with depot_free_stack() accessing the freelist + * management state unioned with @entries. The refcount is zero + * in that case and the below refcount_inc_not_zero() will fail. + */ + if (data_race(stackdepot_memcmp(entries, stack->entries, size))) + continue; + + /* + * Try to increment refcount. If this succeeds, the stack record + * is valid and has not yet been freed. + * + * If STACK_DEPOT_FLAG_GET is not used, it is undefined behavior + * to then call stack_depot_put() later, and we can assume that + * a stack record is never placed back on the freelist. + */ + if ((flags & STACK_DEPOT_FLAG_GET) && !refcount_inc_not_zero(&stack->count)) + continue; + + ret = stack; + break; } - return NULL; + + rcu_read_unlock_sched_notrace(); + + return ret; } -depot_stack_handle_t __stack_depot_save(unsigned long *entries, - unsigned int nr_entries, - gfp_t alloc_flags, bool can_alloc) +depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, + unsigned int nr_entries, + gfp_t alloc_flags, + depot_flags_t depot_flags) { - struct stack_record *found = NULL, **bucket; - union handle_parts retval = { .handle = 0 }; + struct list_head *bucket; + struct stack_record *found = NULL; + depot_stack_handle_t handle = 0; struct page *page = NULL; void *prealloc = NULL; + bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC; unsigned long flags; u32 hash; + if (WARN_ON(depot_flags & ~STACK_DEPOT_FLAGS_MASK)) + return 0; + /* * If this stack trace is from an interrupt, including anything before * interrupt entry usually leads to unbounded stack depot growth. @@ -377,35 +609,28 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, nr_entries = filter_irq_stacks(entries, nr_entries); if (unlikely(nr_entries == 0) || stack_depot_disabled) - goto fast_exit; + return 0; hash = hash_stack(entries, nr_entries); bucket = &stack_table[hash & stack_hash_mask]; - /* - * Fast path: look the stack trace up without locking. - * The smp_load_acquire() here pairs with smp_store_release() to - * |bucket| below. - */ - found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash); + /* Fast path: look the stack trace up without locking. */ + found = find_stack(bucket, entries, nr_entries, hash, depot_flags); if (found) goto exit; /* - * Check if another stack pool needs to be initialized. If so, allocate - * the memory now - we won't be able to do that under the lock. - * - * The smp_load_acquire() here pairs with smp_store_release() to - * |next_pool_inited| in depot_alloc_stack() and depot_init_pool(). + * Allocate memory for a new pool if required now: + * we won't be able to do that under the lock. */ - if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) { + if (unlikely(can_alloc && !READ_ONCE(new_pool))) { /* * Zero out zone modifiers, as we don't have specific zone * requirements. Keep the flags related to allocation in atomic - * contexts and I/O. + * contexts, I/O, nolockdep. */ alloc_flags &= ~GFP_ZONEMASK; - alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); + alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP); alloc_flags |= __GFP_NOWARN; page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER); if (page) @@ -413,29 +638,34 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, } raw_spin_lock_irqsave(&pool_lock, flags); + printk_deferred_enter(); - found = find_stack(*bucket, entries, nr_entries, hash); + /* Try to find again, to avoid concurrently inserting duplicates. */ + found = find_stack(bucket, entries, nr_entries, hash, depot_flags); if (!found) { struct stack_record *new = - depot_alloc_stack(entries, nr_entries, hash, &prealloc); + depot_alloc_stack(entries, nr_entries, hash, depot_flags, &prealloc); if (new) { - new->next = *bucket; /* - * This smp_store_release() pairs with - * smp_load_acquire() from |bucket| above. + * This releases the stack record into the bucket and + * makes it visible to readers in find_stack(). */ - smp_store_release(bucket, new); + list_add_rcu(&new->hash_list, bucket); found = new; } - } else if (prealloc) { + } + + if (prealloc) { /* - * Stack depot already contains this stack trace, but let's - * keep the preallocated memory for the future. + * Either stack depot already contains this stack trace, or + * depot_alloc_stack() did not consume the preallocated memory. + * Try to keep the preallocated memory for future. */ - depot_init_pool(&prealloc); + depot_keep_new_pool(&prealloc); } + printk_deferred_exit(); raw_spin_unlock_irqrestore(&pool_lock, flags); exit: if (prealloc) { @@ -443,31 +673,31 @@ exit: free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER); } if (found) - retval.handle = found->handle.handle; -fast_exit: - return retval.handle; + handle = found->handle.handle; + return handle; } -EXPORT_SYMBOL_GPL(__stack_depot_save); +EXPORT_SYMBOL_GPL(stack_depot_save_flags); depot_stack_handle_t stack_depot_save(unsigned long *entries, unsigned int nr_entries, gfp_t alloc_flags) { - return __stack_depot_save(entries, nr_entries, alloc_flags, true); + return stack_depot_save_flags(entries, nr_entries, alloc_flags, + STACK_DEPOT_FLAG_CAN_ALLOC); } EXPORT_SYMBOL_GPL(stack_depot_save); +struct stack_record *__stack_depot_get_stack_record(depot_stack_handle_t handle) +{ + if (!handle) + return NULL; + + return depot_fetch_stack(handle); +} + unsigned int stack_depot_fetch(depot_stack_handle_t handle, unsigned long **entries) { - union handle_parts parts = { .handle = handle }; - /* - * READ_ONCE pairs with potential concurrent write in - * depot_alloc_stack. - */ - int pool_index_cached = READ_ONCE(pool_index); - void *pool; - size_t offset = parts.offset << DEPOT_STACK_ALIGN; struct stack_record *stack; *entries = NULL; @@ -477,24 +707,42 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle, */ kmsan_unpoison_memory(entries, sizeof(*entries)); - if (!handle) + if (!handle || stack_depot_disabled) return 0; - if (parts.pool_index > pool_index_cached) { - WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", - parts.pool_index, pool_index_cached, handle); - return 0; - } - pool = stack_pools[parts.pool_index]; - if (!pool) + stack = depot_fetch_stack(handle); + /* + * Should never be NULL, otherwise this is a use-after-put (or just a + * corrupt handle). + */ + if (WARN(!stack, "corrupt handle or use after stack_depot_put()")) return 0; - stack = pool + offset; *entries = stack->entries; return stack->size; } EXPORT_SYMBOL_GPL(stack_depot_fetch); +void stack_depot_put(depot_stack_handle_t handle) +{ + struct stack_record *stack; + + if (!handle || stack_depot_disabled) + return; + + stack = depot_fetch_stack(handle); + /* + * Should always be able to find the stack record, otherwise this is an + * unbalanced put attempt (or corrupt handle). + */ + if (WARN(!stack, "corrupt handle or unbalanced stack_depot_put()")) + return; + + if (refcount_dec_and_test(&stack->count)) + depot_free_stack(stack); +} +EXPORT_SYMBOL_GPL(stack_depot_put); + void stack_depot_print(depot_stack_handle_t stack) { unsigned long *entries; @@ -539,3 +787,30 @@ unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle) return parts.extra; } EXPORT_SYMBOL(stack_depot_get_extra_bits); + +static int stats_show(struct seq_file *seq, void *v) +{ + /* + * data race ok: These are just statistics counters, and approximate + * statistics are ok for debugging. + */ + seq_printf(seq, "pools: %d\n", data_race(pools_num)); + for (int i = 0; i < DEPOT_COUNTER_COUNT; i++) + seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i])); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(stats); + +static int depot_debugfs_init(void) +{ + struct dentry *dir; + + if (stack_depot_disabled) + return 0; + + dir = debugfs_create_dir("stackdepot", NULL); + debugfs_create_file("stats", 0444, dir, NULL, &stats_fops); + return 0; +} +late_initcall(depot_debugfs_init); diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c index 05947a2feb93..3bc14d1ee816 100644 --- a/lib/stackinit_kunit.c +++ b/lib/stackinit_kunit.c @@ -63,7 +63,16 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size, #define FETCH_ARG_STRING(var) var #define FETCH_ARG_STRUCT(var) &var +/* + * On m68k, if the leaf function test variable is longer than 8 bytes, + * the start of the stack frame moves. 8 is sufficiently large to + * test m68k char arrays, but leave it at 16 for other architectures. + */ +#ifdef CONFIG_M68K +#define FILL_SIZE_STRING 8 +#else #define FILL_SIZE_STRING 16 +#endif #define INIT_CLONE_SCALAR /**/ #define INIT_CLONE_STRING [FILL_SIZE_STRING] @@ -165,19 +174,23 @@ static noinline void test_ ## name (struct kunit *test) \ /* Verify all bytes overwritten with 0xFF. */ \ for (sum = 0, i = 0; i < target_size; i++) \ sum += (check_buf[i] != 0xFF); \ - KUNIT_ASSERT_EQ_MSG(test, sum, 0, \ - "leaf fill was not 0xFF!?\n"); \ /* Clear entire check buffer for later bit tests. */ \ memset(check_buf, 0x00, sizeof(check_buf)); \ /* Extract stack-defined variable contents. */ \ ignored = leaf_ ##name((unsigned long)&ignored, 0, \ FETCH_ARG_ ## which(zero)); \ + /* \ + * Delay the sum test to here to do as little as \ + * possible between the two leaf function calls. \ + */ \ + KUNIT_ASSERT_EQ_MSG(test, sum, 0, \ + "leaf fill was not 0xFF!?\n"); \ \ /* Validate that compiler lined up fill and target. */ \ KUNIT_ASSERT_TRUE_MSG(test, \ stackinit_range_contains(fill_start, fill_size, \ target_start, target_size), \ - "stack fill missed target!? " \ + "stackframe was not the same between calls!? " \ "(fill %zu wide, target offset by %d)\n", \ fill_size, \ (int)((ssize_t)(uintptr_t)fill_start - \ @@ -404,7 +417,7 @@ static noinline int leaf_switch_2_none(unsigned long sp, bool fill, * These are expected to fail for most configurations because neither * GCC nor Clang have a way to perform initialization of variables in * non-code areas (i.e. in a switch statement before the first "case"). - * https://bugs.llvm.org/show_bug.cgi?id=44916 + * https://llvm.org/pr44916 */ DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, ALWAYS_FAIL); DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL); diff --git a/lib/strcat_kunit.c b/lib/strcat_kunit.c deleted file mode 100644 index e21be95514af..000000000000 --- a/lib/strcat_kunit.c +++ /dev/null @@ -1,104 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Kernel module for testing 'strcat' family of functions. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <kunit/test.h> -#include <linux/string.h> - -static volatile int unconst; - -static void strcat_test(struct kunit *test) -{ - char dest[8]; - - /* Destination is terminated. */ - memset(dest, 0, sizeof(dest)); - KUNIT_EXPECT_EQ(test, strlen(dest), 0); - /* Empty copy does nothing. */ - KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest); - KUNIT_EXPECT_STREQ(test, dest, ""); - /* 4 characters copied in, stops at %NUL. */ - KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest); - KUNIT_EXPECT_STREQ(test, dest, "four"); - KUNIT_EXPECT_EQ(test, dest[5], '\0'); - /* 2 more characters copied in okay. */ - KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest); - KUNIT_EXPECT_STREQ(test, dest, "fourAB"); -} - -static void strncat_test(struct kunit *test) -{ - char dest[8]; - - /* Destination is terminated. */ - memset(dest, 0, sizeof(dest)); - KUNIT_EXPECT_EQ(test, strlen(dest), 0); - /* Empty copy of size 0 does nothing. */ - KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest); - KUNIT_EXPECT_STREQ(test, dest, ""); - /* Empty copy of size 1 does nothing too. */ - KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest); - KUNIT_EXPECT_STREQ(test, dest, ""); - /* Copy of max 0 characters should do nothing. */ - KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest); - KUNIT_EXPECT_STREQ(test, dest, ""); - - /* 4 characters copied in, even if max is 8. */ - KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest); - KUNIT_EXPECT_STREQ(test, dest, "four"); - KUNIT_EXPECT_EQ(test, dest[5], '\0'); - KUNIT_EXPECT_EQ(test, dest[6], '\0'); - /* 2 characters copied in okay, 2 ignored. */ - KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest); - KUNIT_EXPECT_STREQ(test, dest, "fourAB"); -} - -static void strlcat_test(struct kunit *test) -{ - char dest[8] = ""; - int len = sizeof(dest) + unconst; - - /* Destination is terminated. */ - KUNIT_EXPECT_EQ(test, strlen(dest), 0); - /* Empty copy is size 0. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0); - KUNIT_EXPECT_STREQ(test, dest, ""); - /* Size 1 should keep buffer terminated, report size of source only. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4); - KUNIT_EXPECT_STREQ(test, dest, ""); - - /* 4 characters copied in. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4); - KUNIT_EXPECT_STREQ(test, dest, "four"); - /* 2 characters copied in okay, gets to 6 total. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6); - KUNIT_EXPECT_STREQ(test, dest, "fourAB"); - /* 2 characters ignored if max size (7) reached. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8); - KUNIT_EXPECT_STREQ(test, dest, "fourAB"); - /* 1 of 2 characters skipped, now at true max size. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9); - KUNIT_EXPECT_STREQ(test, dest, "fourABE"); - /* Everything else ignored, now at full size. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11); - KUNIT_EXPECT_STREQ(test, dest, "fourABE"); -} - -static struct kunit_case strcat_test_cases[] = { - KUNIT_CASE(strcat_test), - KUNIT_CASE(strncat_test), - KUNIT_CASE(strlcat_test), - {} -}; - -static struct kunit_suite strcat_test_suite = { - .name = "strcat", - .test_cases = strcat_test_cases, -}; - -kunit_test_suite(strcat_test_suite); - -MODULE_LICENSE("GPL"); diff --git a/lib/string.c b/lib/string.c index be26623953d2..966da44bfc86 100644 --- a/lib/string.c +++ b/lib/string.c @@ -15,19 +15,20 @@ */ #define __NO_FORTIFY -#include <linux/types.h> -#include <linux/string.h> -#include <linux/ctype.h> -#include <linux/kernel.h> -#include <linux/export.h> +#include <linux/bits.h> #include <linux/bug.h> +#include <linux/ctype.h> #include <linux/errno.h> -#include <linux/slab.h> +#include <linux/limits.h> +#include <linux/linkage.h> +#include <linux/stddef.h> +#include <linux/string.h> +#include <linux/types.h> +#include <asm/page.h> +#include <asm/rwonce.h> #include <asm/unaligned.h> -#include <asm/byteorder.h> #include <asm/word-at-a-time.h> -#include <asm/page.h> #ifndef __HAVE_ARCH_STRNCASECMP /** @@ -103,23 +104,7 @@ char *strncpy(char *dest, const char *src, size_t count) EXPORT_SYMBOL(strncpy); #endif -#ifndef __HAVE_ARCH_STRLCPY -size_t strlcpy(char *dest, const char *src, size_t size) -{ - size_t ret = strlen(src); - - if (size) { - size_t len = (ret >= size) ? size - 1 : ret; - __builtin_memcpy(dest, src, len); - dest[len] = '\0'; - } - return ret; -} -EXPORT_SYMBOL(strlcpy); -#endif - -#ifndef __HAVE_ARCH_STRSCPY -ssize_t strscpy(char *dest, const char *src, size_t count) +ssize_t sized_strscpy(char *dest, const char *src, size_t count) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; size_t max = count; @@ -185,8 +170,7 @@ ssize_t strscpy(char *dest, const char *src, size_t count) return -E2BIG; } -EXPORT_SYMBOL(strscpy); -#endif +EXPORT_SYMBOL(sized_strscpy); /** * stpcpy - copy a string from src to dest returning a pointer to the new end diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 7713f73e66b0..69ba49b853c7 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -18,12 +18,14 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/string_helpers.h> +#include <kunit/test.h> +#include <kunit/test-bug.h> /** * string_get_size - get the size in the specified units * @size: The size to be converted in blocks * @blk_size: Size of the block (use 1 for size in bytes) - * @units: units to use (powers of 1000 or 1024) + * @units: Units to use (powers of 1000 or 1024), whether to include space separator * @buf: buffer to format to * @len: length of buffer * @@ -37,11 +39,12 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units, char *buf, int len) { + enum string_size_units units_base = units & STRING_UNITS_MASK; static const char *const units_10[] = { - "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB" + "", "k", "M", "G", "T", "P", "E", "Z", "Y", }; static const char *const units_2[] = { - "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB" + "", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi", }; static const char *const *const units_str[] = { [STRING_UNITS_10] = units_10, @@ -66,7 +69,7 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units, /* This is Napier's algorithm. Reduce the original block size to * - * coefficient * divisor[units]^i + * coefficient * divisor[units_base]^i * * we do the reduction so both coefficients are just under 32 bits so * that multiplying them together won't overflow 64 bits and we keep @@ -76,12 +79,12 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units, * precision is in the coefficients. */ while (blk_size >> 32) { - do_div(blk_size, divisor[units]); + do_div(blk_size, divisor[units_base]); i++; } while (size >> 32) { - do_div(size, divisor[units]); + do_div(size, divisor[units_base]); i++; } @@ -90,8 +93,8 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units, size *= blk_size; /* and logarithmically reduce it until it's just under the divisor */ - while (size >= divisor[units]) { - remainder = do_div(size, divisor[units]); + while (size >= divisor[units_base]) { + remainder = do_div(size, divisor[units_base]); i++; } @@ -101,10 +104,10 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units, for (j = 0; sf_cap*10 < 1000; j++) sf_cap *= 10; - if (units == STRING_UNITS_2) { + if (units_base == STRING_UNITS_2) { /* express the remainder as a decimal. It's currently the * numerator of a fraction whose denominator is - * divisor[units], which is 1 << 10 for STRING_UNITS_2 */ + * divisor[units_base], which is 1 << 10 for STRING_UNITS_2 */ remainder *= 1000; remainder >>= 10; } @@ -126,10 +129,12 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units, if (i >= ARRAY_SIZE(units_2)) unit = "UNK"; else - unit = units_str[units][i]; + unit = units_str[units_base][i]; - return snprintf(buf, len, "%u%s %s", (u32)size, - tmp, unit); + return snprintf(buf, len, "%u%s%s%s%s", (u32)size, tmp, + (units & STRING_UNITS_NO_SPACE) ? "" : " ", + unit, + (units & STRING_UNITS_NO_BYTES) ? "" : "B"); } EXPORT_SYMBOL(string_get_size); @@ -826,40 +831,6 @@ char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n) EXPORT_SYMBOL_GPL(devm_kasprintf_strarray); /** - * strscpy_pad() - Copy a C-string into a sized buffer - * @dest: Where to copy the string to - * @src: Where to copy the string from - * @count: Size of destination buffer - * - * Copy the string, or as much of it as fits, into the dest buffer. The - * behavior is undefined if the string buffers overlap. The destination - * buffer is always %NUL terminated, unless it's zero-sized. - * - * If the source string is shorter than the destination buffer, zeros - * the tail of the destination buffer. - * - * For full explanation of why you may want to consider using the - * 'strscpy' functions please see the function docstring for strscpy(). - * - * Returns: - * * The number of characters copied (not including the trailing %NUL) - * * -E2BIG if count is 0 or @src was truncated. - */ -ssize_t strscpy_pad(char *dest, const char *src, size_t count) -{ - ssize_t written; - - written = strscpy(dest, src, count); - if (written < 0 || written == count - 1) - return written; - - memset(dest + written + 1, 0, count - written - 1); - - return written; -} -EXPORT_SYMBOL(strscpy_pad); - -/** * skip_spaces - Removes leading whitespace from @str. * @str: The string to be stripped. * @@ -1042,10 +1013,28 @@ EXPORT_SYMBOL(__read_overflow2_field); void __write_overflow_field(size_t avail, size_t wanted) { } EXPORT_SYMBOL(__write_overflow_field); -void fortify_panic(const char *name) +static const char * const fortify_func_name[] = { +#define MAKE_FORTIFY_FUNC_NAME(func) [MAKE_FORTIFY_FUNC(func)] = #func + EACH_FORTIFY_FUNC(MAKE_FORTIFY_FUNC_NAME) +#undef MAKE_FORTIFY_FUNC_NAME +}; + +void __fortify_report(const u8 reason, const size_t avail, const size_t size) +{ + const u8 func = FORTIFY_REASON_FUNC(reason); + const bool write = FORTIFY_REASON_DIR(reason); + const char *name; + + name = fortify_func_name[umin(func, FORTIFY_FUNC_UNKNOWN)]; + WARN(1, "%s: detected buffer overflow: %zu byte %s of buffer size %zu\n", + name, size, str_read_write(!write), avail); +} +EXPORT_SYMBOL(__fortify_report); + +void __fortify_panic(const u8 reason, const size_t avail, const size_t size) { - pr_emerg("detected buffer overflow in %s\n", name); + __fortify_report(reason, avail, size); BUG(); } -EXPORT_SYMBOL(fortify_panic); +EXPORT_SYMBOL(__fortify_panic); #endif /* CONFIG_FORTIFY_SOURCE */ diff --git a/lib/test-string_helpers.c b/lib/string_helpers_kunit.c index 9a68849a5d55..f88e39fd68d6 100644 --- a/lib/test-string_helpers.c +++ b/lib/string_helpers_kunit.c @@ -1,34 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Test cases for lib/string_helpers.c module. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/init.h> +#include <kunit/test.h> +#include <linux/array_size.h> #include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/module.h> #include <linux/random.h> #include <linux/string.h> #include <linux/string_helpers.h> -static __init bool test_string_check_buf(const char *name, unsigned int flags, - char *in, size_t p, - char *out_real, size_t q_real, - char *out_test, size_t q_test) +static void test_string_check_buf(struct kunit *test, + const char *name, unsigned int flags, + char *in, size_t p, + char *out_real, size_t q_real, + char *out_test, size_t q_test) { - if (q_real == q_test && !memcmp(out_test, out_real, q_test)) - return true; - - pr_warn("Test '%s' failed: flags = %#x\n", name, flags); - - print_hex_dump(KERN_WARNING, "Input: ", DUMP_PREFIX_NONE, 16, 1, - in, p, true); - print_hex_dump(KERN_WARNING, "Expected: ", DUMP_PREFIX_NONE, 16, 1, - out_test, q_test, true); - print_hex_dump(KERN_WARNING, "Got: ", DUMP_PREFIX_NONE, 16, 1, - out_real, q_real, true); - - return false; + KUNIT_ASSERT_EQ_MSG(test, q_real, q_test, "name:%s", name); + KUNIT_EXPECT_MEMEQ_MSG(test, out_test, out_real, q_test, + "name:%s", name); } struct test_string { @@ -37,7 +28,7 @@ struct test_string { unsigned int flags; }; -static const struct test_string strings[] __initconst = { +static const struct test_string strings[] = { { .in = "\\f\\ \\n\\r\\t\\v", .out = "\f\\ \n\r\t\v", @@ -60,17 +51,19 @@ static const struct test_string strings[] __initconst = { }, }; -static void __init test_string_unescape(const char *name, unsigned int flags, - bool inplace) +static void test_string_unescape(struct kunit *test, + const char *name, unsigned int flags, + bool inplace) { int q_real = 256; - char *in = kmalloc(q_real, GFP_KERNEL); - char *out_test = kmalloc(q_real, GFP_KERNEL); - char *out_real = kmalloc(q_real, GFP_KERNEL); + char *in = kunit_kzalloc(test, q_real, GFP_KERNEL); + char *out_test = kunit_kzalloc(test, q_real, GFP_KERNEL); + char *out_real = kunit_kzalloc(test, q_real, GFP_KERNEL); int i, p = 0, q_test = 0; - if (!in || !out_test || !out_real) - goto out; + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, in); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_real); for (i = 0; i < ARRAY_SIZE(strings); i++) { const char *s = strings[i].in; @@ -103,12 +96,8 @@ static void __init test_string_unescape(const char *name, unsigned int flags, q_real = string_unescape(in, out_real, q_real, flags); } - test_string_check_buf(name, flags, in, p - 1, out_real, q_real, + test_string_check_buf(test, name, flags, in, p - 1, out_real, q_real, out_test, q_test); -out: - kfree(out_real); - kfree(out_test); - kfree(in); } struct test_string_1 { @@ -123,7 +112,7 @@ struct test_string_2 { }; #define TEST_STRING_2_DICT_0 NULL -static const struct test_string_2 escape0[] __initconst = {{ +static const struct test_string_2 escape0[] = {{ .in = "\f\\ \n\r\t\v", .s1 = {{ .out = "\\f\\ \\n\\r\\t\\v", @@ -221,7 +210,7 @@ static const struct test_string_2 escape0[] __initconst = {{ }}; #define TEST_STRING_2_DICT_1 "b\\ \t\r\xCF" -static const struct test_string_2 escape1[] __initconst = {{ +static const struct test_string_2 escape1[] = {{ .in = "\f\\ \n\r\t\v", .s1 = {{ .out = "\f\\134\\040\n\\015\\011\v", @@ -358,7 +347,7 @@ static const struct test_string_2 escape1[] __initconst = {{ /* terminator */ }}; -static const struct test_string strings_upper[] __initconst = { +static const struct test_string strings_upper[] = { { .in = "abcdefgh1234567890test", .out = "ABCDEFGH1234567890TEST", @@ -369,7 +358,7 @@ static const struct test_string strings_upper[] __initconst = { }, }; -static const struct test_string strings_lower[] __initconst = { +static const struct test_string strings_lower[] = { { .in = "ABCDEFGH1234567890TEST", .out = "abcdefgh1234567890test", @@ -380,8 +369,8 @@ static const struct test_string strings_lower[] __initconst = { }, }; -static __init const char *test_string_find_match(const struct test_string_2 *s2, - unsigned int flags) +static const char *test_string_find_match(const struct test_string_2 *s2, + unsigned int flags) { const struct test_string_1 *s1 = s2->s1; unsigned int i; @@ -402,31 +391,31 @@ static __init const char *test_string_find_match(const struct test_string_2 *s2, return NULL; } -static __init void -test_string_escape_overflow(const char *in, int p, unsigned int flags, const char *esc, +static void +test_string_escape_overflow(struct kunit *test, + const char *in, int p, unsigned int flags, const char *esc, int q_test, const char *name) { int q_real; q_real = string_escape_mem(in, p, NULL, 0, flags, esc); - if (q_real != q_test) - pr_warn("Test '%s' failed: flags = %#x, osz = 0, expected %d, got %d\n", - name, flags, q_test, q_real); + KUNIT_EXPECT_EQ_MSG(test, q_real, q_test, "name:%s: flags:%#x", name, flags); } -static __init void test_string_escape(const char *name, - const struct test_string_2 *s2, - unsigned int flags, const char *esc) +static void test_string_escape(struct kunit *test, const char *name, + const struct test_string_2 *s2, + unsigned int flags, const char *esc) { size_t out_size = 512; - char *out_test = kmalloc(out_size, GFP_KERNEL); - char *out_real = kmalloc(out_size, GFP_KERNEL); - char *in = kmalloc(256, GFP_KERNEL); + char *out_test = kunit_kzalloc(test, out_size, GFP_KERNEL); + char *out_real = kunit_kzalloc(test, out_size, GFP_KERNEL); + char *in = kunit_kzalloc(test, 256, GFP_KERNEL); int p = 0, q_test = 0; int q_real; - if (!out_test || !out_real || !in) - goto out; + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_real); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, in); for (; s2->in; s2++) { const char *out; @@ -462,62 +451,99 @@ static __init void test_string_escape(const char *name, q_real = string_escape_mem(in, p, out_real, out_size, flags, esc); - test_string_check_buf(name, flags, in, p, out_real, q_real, out_test, + test_string_check_buf(test, name, flags, in, p, out_real, q_real, out_test, q_test); - test_string_escape_overflow(in, p, flags, esc, q_test, name); - -out: - kfree(in); - kfree(out_real); - kfree(out_test); + test_string_escape_overflow(test, in, p, flags, esc, q_test, name); } #define string_get_size_maxbuf 16 -#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \ - do { \ - BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \ - BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \ - __test_string_get_size((size), (blk_size), (exp_result10), \ - (exp_result2)); \ +#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \ + do { \ + BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \ + BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \ + __test_string_get_size(test, (size), (blk_size), (exp_result10), \ + (exp_result2)); \ } while (0) -static __init void test_string_get_size_check(const char *units, - const char *exp, - char *res, - const u64 size, - const u64 blk_size) +static void test_string_get_size_check(struct kunit *test, + const char *units, + const char *exp, + char *res, + const u64 size, + const u64 blk_size) { - if (!memcmp(res, exp, strlen(exp) + 1)) - return; - - res[string_get_size_maxbuf - 1] = '\0'; - - pr_warn("Test 'test_string_get_size' failed!\n"); - pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %s)\n", + KUNIT_EXPECT_MEMEQ_MSG(test, res, exp, strlen(exp) + 1, + "string_get_size(size = %llu, blk_size = %llu, units = %s)", size, blk_size, units); - pr_warn("expected: '%s', got '%s'\n", exp, res); } -static __init void __test_string_get_size(const u64 size, const u64 blk_size, - const char *exp_result10, - const char *exp_result2) +static void __strchrcut(char *dst, const char *src, const char *cut) +{ + const char *from = src; + size_t len; + + do { + len = strcspn(from, cut); + memcpy(dst, from, len); + dst += len; + from += len; + } while (*from++); + *dst = '\0'; +} + +static void __test_string_get_size_one(struct kunit *test, + const u64 size, const u64 blk_size, + const char *exp_result10, + const char *exp_result2, + enum string_size_units units, + const char *cut) { char buf10[string_get_size_maxbuf]; char buf2[string_get_size_maxbuf]; + char exp10[string_get_size_maxbuf]; + char exp2[string_get_size_maxbuf]; + char prefix10[64]; + char prefix2[64]; + + sprintf(prefix10, "STRING_UNITS_10 [%s]", cut); + sprintf(prefix2, "STRING_UNITS_2 [%s]", cut); + + __strchrcut(exp10, exp_result10, cut); + __strchrcut(exp2, exp_result2, cut); - string_get_size(size, blk_size, STRING_UNITS_10, buf10, sizeof(buf10)); - string_get_size(size, blk_size, STRING_UNITS_2, buf2, sizeof(buf2)); + string_get_size(size, blk_size, STRING_UNITS_10 | units, buf10, sizeof(buf10)); + string_get_size(size, blk_size, STRING_UNITS_2 | units, buf2, sizeof(buf2)); - test_string_get_size_check("STRING_UNITS_10", exp_result10, buf10, - size, blk_size); + test_string_get_size_check(test, prefix10, exp10, buf10, size, blk_size); + test_string_get_size_check(test, prefix2, exp2, buf2, size, blk_size); +} + +static void __test_string_get_size(struct kunit *test, + const u64 size, const u64 blk_size, + const char *exp_result10, + const char *exp_result2) +{ + struct { + enum string_size_units units; + const char *cut; + } get_size_test_cases[] = { + { 0, "" }, + { STRING_UNITS_NO_SPACE, " " }, + { STRING_UNITS_NO_SPACE | STRING_UNITS_NO_BYTES, " B" }, + { STRING_UNITS_NO_BYTES, "B" }, + }; + int i; - test_string_get_size_check("STRING_UNITS_2", exp_result2, buf2, - size, blk_size); + for (i = 0; i < ARRAY_SIZE(get_size_test_cases); i++) + __test_string_get_size_one(test, size, blk_size, + exp_result10, exp_result2, + get_size_test_cases[i].units, + get_size_test_cases[i].cut); } -static __init void test_string_get_size(void) +static void test_get_size(struct kunit *test) { /* small values */ test_string_get_size_one(0, 512, "0 B", "0 B"); @@ -537,7 +563,7 @@ static __init void test_string_get_size(void) test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB"); } -static void __init test_string_upper_lower(void) +static void test_upper_lower(struct kunit *test) { char *dst; int i; @@ -547,16 +573,10 @@ static void __init test_string_upper_lower(void) int len = strlen(strings_upper[i].in) + 1; dst = kmalloc(len, GFP_KERNEL); - if (!dst) - return; + KUNIT_ASSERT_NOT_NULL(test, dst); string_upper(dst, s); - if (memcmp(dst, strings_upper[i].out, len)) { - pr_warn("Test 'string_upper' failed : expected %s, got %s!\n", - strings_upper[i].out, dst); - kfree(dst); - return; - } + KUNIT_EXPECT_STREQ(test, dst, strings_upper[i].out); kfree(dst); } @@ -565,45 +585,44 @@ static void __init test_string_upper_lower(void) int len = strlen(strings_lower[i].in) + 1; dst = kmalloc(len, GFP_KERNEL); - if (!dst) - return; + KUNIT_ASSERT_NOT_NULL(test, dst); string_lower(dst, s); - if (memcmp(dst, strings_lower[i].out, len)) { - pr_warn("Test 'string_lower failed : : expected %s, got %s!\n", - strings_lower[i].out, dst); - kfree(dst); - return; - } + KUNIT_EXPECT_STREQ(test, dst, strings_lower[i].out); kfree(dst); } } -static int __init test_string_helpers_init(void) +static void test_unescape(struct kunit *test) { unsigned int i; - pr_info("Running tests...\n"); for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++) - test_string_unescape("unescape", i, false); - test_string_unescape("unescape inplace", + test_string_unescape(test, "unescape", i, false); + test_string_unescape(test, "unescape inplace", get_random_u32_below(UNESCAPE_ALL_MASK + 1), true); /* Without dictionary */ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++) - test_string_escape("escape 0", escape0, i, TEST_STRING_2_DICT_0); + test_string_escape(test, "escape 0", escape0, i, TEST_STRING_2_DICT_0); /* With dictionary */ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++) - test_string_escape("escape 1", escape1, i, TEST_STRING_2_DICT_1); + test_string_escape(test, "escape 1", escape1, i, TEST_STRING_2_DICT_1); +} - /* Test string_get_size() */ - test_string_get_size(); +static struct kunit_case string_helpers_test_cases[] = { + KUNIT_CASE(test_get_size), + KUNIT_CASE(test_upper_lower), + KUNIT_CASE(test_unescape), + {} +}; - /* Test string upper(), string_lower() */ - test_string_upper_lower(); +static struct kunit_suite string_helpers_test_suite = { + .name = "string_helpers", + .test_cases = string_helpers_test_cases, +}; + +kunit_test_suites(&string_helpers_test_suite); - return -EINVAL; -} -module_init(test_string_helpers_init); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/lib/string_kunit.c b/lib/string_kunit.c new file mode 100644 index 000000000000..2a812decf14b --- /dev/null +++ b/lib/string_kunit.c @@ -0,0 +1,636 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Test cases for string functions. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <kunit/test.h> +#include <linux/module.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/string.h> + +#define STRCMP_LARGE_BUF_LEN 2048 +#define STRCMP_CHANGE_POINT 1337 +#define STRCMP_TEST_EXPECT_EQUAL(test, fn, ...) KUNIT_EXPECT_EQ(test, fn(__VA_ARGS__), 0) +#define STRCMP_TEST_EXPECT_LOWER(test, fn, ...) KUNIT_EXPECT_LT(test, fn(__VA_ARGS__), 0) +#define STRCMP_TEST_EXPECT_GREATER(test, fn, ...) KUNIT_EXPECT_GT(test, fn(__VA_ARGS__), 0) + +static void string_test_memset16(struct kunit *test) +{ + unsigned i, j, k; + u16 v, *p; + + p = kunit_kzalloc(test, 256 * 2 * 2, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p); + + for (i = 0; i < 256; i++) { + for (j = 0; j < 256; j++) { + memset(p, 0xa1, 256 * 2 * sizeof(v)); + memset16(p + i, 0xb1b2, j); + for (k = 0; k < 512; k++) { + v = p[k]; + if (k < i) { + KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1, + "i:%d j:%d k:%d", i, j, k); + } else if (k < i + j) { + KUNIT_ASSERT_EQ_MSG(test, v, 0xb1b2, + "i:%d j:%d k:%d", i, j, k); + } else { + KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1, + "i:%d j:%d k:%d", i, j, k); + } + } + } + } +} + +static void string_test_memset32(struct kunit *test) +{ + unsigned i, j, k; + u32 v, *p; + + p = kunit_kzalloc(test, 256 * 2 * 4, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p); + + for (i = 0; i < 256; i++) { + for (j = 0; j < 256; j++) { + memset(p, 0xa1, 256 * 2 * sizeof(v)); + memset32(p + i, 0xb1b2b3b4, j); + for (k = 0; k < 512; k++) { + v = p[k]; + if (k < i) { + KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1, + "i:%d j:%d k:%d", i, j, k); + } else if (k < i + j) { + KUNIT_ASSERT_EQ_MSG(test, v, 0xb1b2b3b4, + "i:%d j:%d k:%d", i, j, k); + } else { + KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1, + "i:%d j:%d k:%d", i, j, k); + } + } + } + } +} + +static void string_test_memset64(struct kunit *test) +{ + unsigned i, j, k; + u64 v, *p; + + p = kunit_kzalloc(test, 256 * 2 * 8, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p); + + for (i = 0; i < 256; i++) { + for (j = 0; j < 256; j++) { + memset(p, 0xa1, 256 * 2 * sizeof(v)); + memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j); + for (k = 0; k < 512; k++) { + v = p[k]; + if (k < i) { + KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1a1a1a1a1ULL, + "i:%d j:%d k:%d", i, j, k); + } else if (k < i + j) { + KUNIT_ASSERT_EQ_MSG(test, v, 0xb1b2b3b4b5b6b7b8ULL, + "i:%d j:%d k:%d", i, j, k); + } else { + KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1a1a1a1a1ULL, + "i:%d j:%d k:%d", i, j, k); + } + } + } + } +} + +static void string_test_strchr(struct kunit *test) +{ + const char *test_string = "abcdefghijkl"; + const char *empty_string = ""; + char *result; + int i; + + for (i = 0; i < strlen(test_string) + 1; i++) { + result = strchr(test_string, test_string[i]); + KUNIT_ASSERT_EQ_MSG(test, result - test_string, i, + "char:%c", 'a' + i); + } + + result = strchr(empty_string, '\0'); + KUNIT_ASSERT_PTR_EQ(test, result, empty_string); + + result = strchr(empty_string, 'a'); + KUNIT_ASSERT_NULL(test, result); + + result = strchr(test_string, 'z'); + KUNIT_ASSERT_NULL(test, result); +} + +static void string_test_strnchr(struct kunit *test) +{ + const char *test_string = "abcdefghijkl"; + const char *empty_string = ""; + char *result; + int i, j; + + for (i = 0; i < strlen(test_string) + 1; i++) { + for (j = 0; j < strlen(test_string) + 2; j++) { + result = strnchr(test_string, j, test_string[i]); + if (j <= i) { + KUNIT_ASSERT_NULL_MSG(test, result, + "char:%c i:%d j:%d", 'a' + i, i, j); + } else { + KUNIT_ASSERT_EQ_MSG(test, result - test_string, i, + "char:%c i:%d j:%d", 'a' + i, i, j); + } + } + } + + result = strnchr(empty_string, 0, '\0'); + KUNIT_ASSERT_NULL(test, result); + + result = strnchr(empty_string, 1, '\0'); + KUNIT_ASSERT_PTR_EQ(test, result, empty_string); + + result = strnchr(empty_string, 1, 'a'); + KUNIT_ASSERT_NULL(test, result); + + result = strnchr(NULL, 0, '\0'); + KUNIT_ASSERT_NULL(test, result); +} + +static void string_test_strspn(struct kunit *test) +{ + static const struct strspn_test { + const char str[16]; + const char accept[16]; + const char reject[16]; + unsigned a; + unsigned r; + } tests[] = { + { "foobar", "", "", 0, 6 }, + { "abba", "abc", "ABBA", 4, 4 }, + { "abba", "a", "b", 1, 1 }, + { "", "abc", "abc", 0, 0}, + }; + const struct strspn_test *s = tests; + size_t i; + + for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) { + KUNIT_ASSERT_EQ_MSG(test, s->a, strspn(s->str, s->accept), + "i:%zu", i); + KUNIT_ASSERT_EQ_MSG(test, s->r, strcspn(s->str, s->reject), + "i:%zu", i); + } +} + +static char strcmp_buffer1[STRCMP_LARGE_BUF_LEN]; +static char strcmp_buffer2[STRCMP_LARGE_BUF_LEN]; + +static void strcmp_fill_buffers(char fill1, char fill2) +{ + memset(strcmp_buffer1, fill1, STRCMP_LARGE_BUF_LEN); + memset(strcmp_buffer2, fill2, STRCMP_LARGE_BUF_LEN); + strcmp_buffer1[STRCMP_LARGE_BUF_LEN - 1] = 0; + strcmp_buffer2[STRCMP_LARGE_BUF_LEN - 1] = 0; +} + +static void string_test_strcmp(struct kunit *test) +{ + /* Equal strings */ + STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "Hello, Kernel!", "Hello, Kernel!"); + /* First string is lexicographically less than the second */ + STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Hello, KUnit!", "Hello, Kernel!"); + /* First string is lexicographically larger than the second */ + STRCMP_TEST_EXPECT_GREATER(test, strcmp, "Hello, Kernel!", "Hello, KUnit!"); + /* Empty string is always lexicographically less than any non-empty string */ + STRCMP_TEST_EXPECT_LOWER(test, strcmp, "", "Non-empty string"); + /* Two empty strings should be equal */ + STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "", ""); + /* Compare two strings which have only one char difference */ + STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Abacaba", "Abadaba"); + /* Compare two strings which have the same prefix*/ + STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Just a string", "Just a string and something else"); +} + +static void string_test_strcmp_long_strings(struct kunit *test) +{ + strcmp_fill_buffers('B', 'B'); + STRCMP_TEST_EXPECT_EQUAL(test, strcmp, strcmp_buffer1, strcmp_buffer2); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A'; + STRCMP_TEST_EXPECT_LOWER(test, strcmp, strcmp_buffer1, strcmp_buffer2); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C'; + STRCMP_TEST_EXPECT_GREATER(test, strcmp, strcmp_buffer1, strcmp_buffer2); +} + +static void string_test_strncmp(struct kunit *test) +{ + /* Equal strings */ + STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, KUnit!", "Hello, KUnit!", 13); + /* First string is lexicographically less than the second */ + STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Hello, KUnit!", "Hello, Kernel!", 13); + /* Result is always 'equal' when count = 0 */ + STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, Kernel!", "Hello, KUnit!", 0); + /* Strings with common prefix are equal if count = length of prefix */ + STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Abacaba", "Abadaba", 3); + /* Strings with common prefix are not equal when count = length of prefix + 1 */ + STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Abacaba", "Abadaba", 4); + /* If one string is a prefix of another, the shorter string is lexicographically smaller */ + STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Just a string", "Just a string and something else", + strlen("Just a string and something else")); + /* + * If one string is a prefix of another, and we check first length + * of prefix chars, the result is 'equal' + */ + STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Just a string", "Just a string and something else", + strlen("Just a string")); +} + +static void string_test_strncmp_long_strings(struct kunit *test) +{ + strcmp_fill_buffers('B', 'B'); + STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_LARGE_BUF_LEN); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A'; + STRCMP_TEST_EXPECT_LOWER(test, strncmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_LARGE_BUF_LEN); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C'; + STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_LARGE_BUF_LEN); + /* the strings are equal up to STRCMP_CHANGE_POINT */ + STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_CHANGE_POINT); + STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_CHANGE_POINT + 1); +} + +static void string_test_strcasecmp(struct kunit *test) +{ + /* Same strings in different case should be equal */ + STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "Hello, Kernel!", "HeLLO, KErNeL!"); + /* Empty strings should be equal */ + STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "", ""); + /* Despite ascii code for 'a' is larger than ascii code for 'B', 'a' < 'B' */ + STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, "a", "B"); + STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, "B", "a"); + /* Special symbols and numbers should be processed correctly */ + STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "-+**.1230ghTTT~^", "-+**.1230Ghttt~^"); +} + +static void string_test_strcasecmp_long_strings(struct kunit *test) +{ + strcmp_fill_buffers('b', 'B'); + STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, strcmp_buffer1, strcmp_buffer2); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a'; + STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C'; + STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2); +} + +static void string_test_strncasecmp(struct kunit *test) +{ + /* Same strings in different case should be equal */ + STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbAcAbA", "Abacaba", strlen("Abacaba")); + /* strncasecmp should check 'count' chars only */ + STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbaCaBa", "abaCaDa", 5); + STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, "a", "B", 1); + STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, "B", "a", 1); + /* Result is always 'equal' when count = 0 */ + STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "Abacaba", "Not abacaba", 0); +} + +static void string_test_strncasecmp_long_strings(struct kunit *test) +{ + strcmp_fill_buffers('b', 'B'); + STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_LARGE_BUF_LEN); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a'; + STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_LARGE_BUF_LEN); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C'; + STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_LARGE_BUF_LEN); + + STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_CHANGE_POINT); + STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_CHANGE_POINT + 1); +} + +/** + * strscpy_check() - Run a specific test case. + * @test: KUnit test context pointer + * @src: Source string, argument to strscpy_pad() + * @count: Size of destination buffer, argument to strscpy_pad() + * @expected: Expected return value from call to strscpy_pad() + * @chars: Number of characters from the src string expected to be + * written to the dst buffer. + * @terminator: 1 if there should be a terminating null byte 0 otherwise. + * @pad: Number of pad characters expected (in the tail of dst buffer). + * (@pad does not include the null terminator byte.) + * + * Calls strscpy_pad() and verifies the return value and state of the + * destination buffer after the call returns. + */ +static void strscpy_check(struct kunit *test, char *src, int count, + int expected, int chars, int terminator, int pad) +{ + int nr_bytes_poison; + int max_expected; + int max_count; + int written; + char buf[6]; + int index, i; + const char POISON = 'z'; + + KUNIT_ASSERT_TRUE_MSG(test, src != NULL, + "null source string not supported"); + + memset(buf, POISON, sizeof(buf)); + /* Future proofing test suite, validate args */ + max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */ + max_expected = count - 1; /* Space for the null */ + + KUNIT_ASSERT_LE_MSG(test, count, max_count, + "count (%d) is too big (%d) ... aborting", count, max_count); + KUNIT_EXPECT_LE_MSG(test, expected, max_expected, + "expected (%d) is bigger than can possibly be returned (%d)", + expected, max_expected); + + written = strscpy_pad(buf, src, count); + KUNIT_ASSERT_EQ(test, written, expected); + + if (count && written == -E2BIG) { + KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1), + "buffer state invalid for -E2BIG"); + KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0', + "too big string is not null terminated correctly"); + } + + for (i = 0; i < chars; i++) + KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i], + "buf[i]==%c != src[i]==%c", buf[i], src[i]); + + if (terminator) + KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0', + "string is not null terminated correctly"); + + for (i = 0; i < pad; i++) { + index = chars + terminator + i; + KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0', + "padding missing at index: %d", i); + } + + nr_bytes_poison = sizeof(buf) - chars - terminator - pad; + for (i = 0; i < nr_bytes_poison; i++) { + index = sizeof(buf) - 1 - i; /* Check from the end back */ + KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON, + "poison value missing at index: %d", i); + } +} + +static void string_test_strscpy(struct kunit *test) +{ + char dest[8]; + + /* + * strscpy_check() uses a destination buffer of size 6 and needs at + * least 2 characters spare (one for null and one to check for + * overflow). This means we should only call tc() with + * strings up to a maximum of 4 characters long and 'count' + * should not exceed 4. To test with longer strings increase + * the buffer size in tc(). + */ + + /* strscpy_check(test, src, count, expected, chars, terminator, pad) */ + strscpy_check(test, "a", 0, -E2BIG, 0, 0, 0); + strscpy_check(test, "", 0, -E2BIG, 0, 0, 0); + + strscpy_check(test, "a", 1, -E2BIG, 0, 1, 0); + strscpy_check(test, "", 1, 0, 0, 1, 0); + + strscpy_check(test, "ab", 2, -E2BIG, 1, 1, 0); + strscpy_check(test, "a", 2, 1, 1, 1, 0); + strscpy_check(test, "", 2, 0, 0, 1, 1); + + strscpy_check(test, "abc", 3, -E2BIG, 2, 1, 0); + strscpy_check(test, "ab", 3, 2, 2, 1, 0); + strscpy_check(test, "a", 3, 1, 1, 1, 1); + strscpy_check(test, "", 3, 0, 0, 1, 2); + + strscpy_check(test, "abcd", 4, -E2BIG, 3, 1, 0); + strscpy_check(test, "abc", 4, 3, 3, 1, 0); + strscpy_check(test, "ab", 4, 2, 2, 1, 1); + strscpy_check(test, "a", 4, 1, 1, 1, 2); + strscpy_check(test, "", 4, 0, 0, 1, 3); + + /* Compile-time-known source strings. */ + KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0); + KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0); + KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0); + KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG); + KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5); + KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG); + KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG); + KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG); + KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG); +} + +static volatile int unconst; + +static void string_test_strcat(struct kunit *test) +{ + char dest[8]; + + /* Destination is terminated. */ + memset(dest, 0, sizeof(dest)); + KUNIT_EXPECT_EQ(test, strlen(dest), 0); + /* Empty copy does nothing. */ + KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* 4 characters copied in, stops at %NUL. */ + KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest); + KUNIT_EXPECT_STREQ(test, dest, "four"); + KUNIT_EXPECT_EQ(test, dest[5], '\0'); + /* 2 more characters copied in okay. */ + KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); +} + +static void string_test_strncat(struct kunit *test) +{ + char dest[8]; + + /* Destination is terminated. */ + memset(dest, 0, sizeof(dest)); + KUNIT_EXPECT_EQ(test, strlen(dest), 0); + /* Empty copy of size 0 does nothing. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* Empty copy of size 1 does nothing too. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* Copy of max 0 characters should do nothing. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + + /* 4 characters copied in, even if max is 8. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, "four"); + KUNIT_EXPECT_EQ(test, dest[5], '\0'); + KUNIT_EXPECT_EQ(test, dest[6], '\0'); + /* 2 characters copied in okay, 2 ignored. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); +} + +static void string_test_strlcat(struct kunit *test) +{ + char dest[8] = ""; + int len = sizeof(dest) + unconst; + + /* Destination is terminated. */ + KUNIT_EXPECT_EQ(test, strlen(dest), 0); + /* Empty copy is size 0. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* Size 1 should keep buffer terminated, report size of source only. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4); + KUNIT_EXPECT_STREQ(test, dest, ""); + + /* 4 characters copied in. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4); + KUNIT_EXPECT_STREQ(test, dest, "four"); + /* 2 characters copied in okay, gets to 6 total. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); + /* 2 characters ignored if max size (7) reached. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); + /* 1 of 2 characters skipped, now at true max size. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9); + KUNIT_EXPECT_STREQ(test, dest, "fourABE"); + /* Everything else ignored, now at full size. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11); + KUNIT_EXPECT_STREQ(test, dest, "fourABE"); +} + +static void string_test_strtomem(struct kunit *test) +{ + static const char input[sizeof(unsigned long)] = "hi"; + static const char truncate[] = "this is too long"; + struct { + unsigned long canary1; + unsigned char output[sizeof(unsigned long)] __nonstring; + unsigned long canary2; + } wrap; + + memset(&wrap, 0xFF, sizeof(wrap)); + KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX, + "bad initial canary value"); + KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX, + "bad initial canary value"); + + /* Check unpadded copy leaves surroundings untouched. */ + strtomem(wrap.output, input); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); + KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); + for (size_t i = 2; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); + + /* Check truncated copy leaves surroundings untouched. */ + memset(&wrap, 0xFF, sizeof(wrap)); + strtomem(wrap.output, truncate); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + for (size_t i = 0; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); + + /* Check padded copy leaves only string padded. */ + memset(&wrap, 0xFF, sizeof(wrap)); + strtomem_pad(wrap.output, input, 0xAA); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); + KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); + for (size_t i = 2; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); + + /* Check truncated padded copy has no padding. */ + memset(&wrap, 0xFF, sizeof(wrap)); + strtomem(wrap.output, truncate); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + for (size_t i = 0; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); +} + + +static void string_test_memtostr(struct kunit *test) +{ + char nonstring[7] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g' }; + char nonstring_small[3] = { 'a', 'b', 'c' }; + char dest[sizeof(nonstring) + 1]; + + /* Copy in a non-NUL-terminated string into exactly right-sized dest. */ + KUNIT_EXPECT_EQ(test, sizeof(dest), sizeof(nonstring) + 1); + memset(dest, 'X', sizeof(dest)); + memtostr(dest, nonstring); + KUNIT_EXPECT_STREQ(test, dest, "abcdefg"); + memset(dest, 'X', sizeof(dest)); + memtostr(dest, nonstring_small); + KUNIT_EXPECT_STREQ(test, dest, "abc"); + KUNIT_EXPECT_EQ(test, dest[7], 'X'); + + memset(dest, 'X', sizeof(dest)); + memtostr_pad(dest, nonstring); + KUNIT_EXPECT_STREQ(test, dest, "abcdefg"); + memset(dest, 'X', sizeof(dest)); + memtostr_pad(dest, nonstring_small); + KUNIT_EXPECT_STREQ(test, dest, "abc"); + KUNIT_EXPECT_EQ(test, dest[7], '\0'); +} + +static struct kunit_case string_test_cases[] = { + KUNIT_CASE(string_test_memset16), + KUNIT_CASE(string_test_memset32), + KUNIT_CASE(string_test_memset64), + KUNIT_CASE(string_test_strchr), + KUNIT_CASE(string_test_strnchr), + KUNIT_CASE(string_test_strspn), + KUNIT_CASE(string_test_strcmp), + KUNIT_CASE(string_test_strcmp_long_strings), + KUNIT_CASE(string_test_strncmp), + KUNIT_CASE(string_test_strncmp_long_strings), + KUNIT_CASE(string_test_strcasecmp), + KUNIT_CASE(string_test_strcasecmp_long_strings), + KUNIT_CASE(string_test_strncasecmp), + KUNIT_CASE(string_test_strncasecmp_long_strings), + KUNIT_CASE(string_test_strscpy), + KUNIT_CASE(string_test_strcat), + KUNIT_CASE(string_test_strncat), + KUNIT_CASE(string_test_strlcat), + KUNIT_CASE(string_test_strtomem), + KUNIT_CASE(string_test_memtostr), + {} +}; + +static struct kunit_suite string_test_suite = { + .name = "string", + .test_cases = string_test_cases, +}; + +kunit_test_suites(&string_test_suite); + +MODULE_LICENSE("GPL v2"); diff --git a/lib/strscpy_kunit.c b/lib/strscpy_kunit.c deleted file mode 100644 index a6b6344354ed..000000000000 --- a/lib/strscpy_kunit.c +++ /dev/null @@ -1,142 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Kernel module for testing 'strscpy' family of functions. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <kunit/test.h> -#include <linux/string.h> - -/* - * tc() - Run a specific test case. - * @src: Source string, argument to strscpy_pad() - * @count: Size of destination buffer, argument to strscpy_pad() - * @expected: Expected return value from call to strscpy_pad() - * @terminator: 1 if there should be a terminating null byte 0 otherwise. - * @chars: Number of characters from the src string expected to be - * written to the dst buffer. - * @pad: Number of pad characters expected (in the tail of dst buffer). - * (@pad does not include the null terminator byte.) - * - * Calls strscpy_pad() and verifies the return value and state of the - * destination buffer after the call returns. - */ -static void tc(struct kunit *test, char *src, int count, int expected, - int chars, int terminator, int pad) -{ - int nr_bytes_poison; - int max_expected; - int max_count; - int written; - char buf[6]; - int index, i; - const char POISON = 'z'; - - KUNIT_ASSERT_TRUE_MSG(test, src != NULL, - "null source string not supported"); - - memset(buf, POISON, sizeof(buf)); - /* Future proofing test suite, validate args */ - max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */ - max_expected = count - 1; /* Space for the null */ - - KUNIT_ASSERT_LE_MSG(test, count, max_count, - "count (%d) is too big (%d) ... aborting", count, max_count); - KUNIT_EXPECT_LE_MSG(test, expected, max_expected, - "expected (%d) is bigger than can possibly be returned (%d)", - expected, max_expected); - - written = strscpy_pad(buf, src, count); - KUNIT_ASSERT_EQ(test, written, expected); - - if (count && written == -E2BIG) { - KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1), - "buffer state invalid for -E2BIG"); - KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0', - "too big string is not null terminated correctly"); - } - - for (i = 0; i < chars; i++) - KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i], - "buf[i]==%c != src[i]==%c", buf[i], src[i]); - - if (terminator) - KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0', - "string is not null terminated correctly"); - - for (i = 0; i < pad; i++) { - index = chars + terminator + i; - KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0', - "padding missing at index: %d", i); - } - - nr_bytes_poison = sizeof(buf) - chars - terminator - pad; - for (i = 0; i < nr_bytes_poison; i++) { - index = sizeof(buf) - 1 - i; /* Check from the end back */ - KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON, - "poison value missing at index: %d", i); - } -} - -static void strscpy_test(struct kunit *test) -{ - char dest[8]; - - /* - * tc() uses a destination buffer of size 6 and needs at - * least 2 characters spare (one for null and one to check for - * overflow). This means we should only call tc() with - * strings up to a maximum of 4 characters long and 'count' - * should not exceed 4. To test with longer strings increase - * the buffer size in tc(). - */ - - /* tc(test, src, count, expected, chars, terminator, pad) */ - tc(test, "a", 0, -E2BIG, 0, 0, 0); - tc(test, "", 0, -E2BIG, 0, 0, 0); - - tc(test, "a", 1, -E2BIG, 0, 1, 0); - tc(test, "", 1, 0, 0, 1, 0); - - tc(test, "ab", 2, -E2BIG, 1, 1, 0); - tc(test, "a", 2, 1, 1, 1, 0); - tc(test, "", 2, 0, 0, 1, 1); - - tc(test, "abc", 3, -E2BIG, 2, 1, 0); - tc(test, "ab", 3, 2, 2, 1, 0); - tc(test, "a", 3, 1, 1, 1, 1); - tc(test, "", 3, 0, 0, 1, 2); - - tc(test, "abcd", 4, -E2BIG, 3, 1, 0); - tc(test, "abc", 4, 3, 3, 1, 0); - tc(test, "ab", 4, 2, 2, 1, 1); - tc(test, "a", 4, 1, 1, 1, 2); - tc(test, "", 4, 0, 0, 1, 3); - - /* Compile-time-known source strings. */ - KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0); - KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0); - KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0); - KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG); - KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5); - KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG); - KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG); - KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG); - KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG); -} - -static struct kunit_case strscpy_test_cases[] = { - KUNIT_CASE(strscpy_test), - {} -}; - -static struct kunit_suite strscpy_test_suite = { - .name = "strscpy", - .test_cases = strscpy_test_cases, -}; - -kunit_test_suite(strscpy_test_suite); - -MODULE_AUTHOR("Tobin C. Harding <tobin@kernel.org>"); -MODULE_LICENSE("GPL"); diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index 65f22c2578b0..83019beabce4 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -60,18 +60,17 @@ static const unsigned long exp3_1_0[] __initconst = { }; static bool __init -__check_eq_uint(const char *srcfile, unsigned int line, - const unsigned int exp_uint, unsigned int x) +__check_eq_ulong(const char *srcfile, unsigned int line, + const unsigned long exp_ulong, unsigned long x) { - if (exp_uint != x) { - pr_err("[%s:%u] expected %u, got %u\n", - srcfile, line, exp_uint, x); + if (exp_ulong != x) { + pr_err("[%s:%u] expected %lu, got %lu\n", + srcfile, line, exp_ulong, x); return false; } return true; } - static bool __init __check_eq_bitmap(const char *srcfile, unsigned int line, const unsigned long *exp_bmap, const unsigned long *bmap, @@ -185,7 +184,8 @@ __check_eq_str(const char *srcfile, unsigned int line, result; \ }) -#define expect_eq_uint(...) __expect_eq(uint, ##__VA_ARGS__) +#define expect_eq_ulong(...) __expect_eq(ulong, ##__VA_ARGS__) +#define expect_eq_uint(x, y) expect_eq_ulong((unsigned int)(x), (unsigned int)(y)) #define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__) #define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__) #define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__) @@ -380,6 +380,47 @@ static void __init test_replace(void) expect_eq_bitmap(bmap, exp3_1_0, nbits); } +static const unsigned long sg_mask[] __initconst = { + BITMAP_FROM_U64(0x000000000000035aULL), +}; + +static const unsigned long sg_src[] __initconst = { + BITMAP_FROM_U64(0x0000000000000667ULL), +}; + +static const unsigned long sg_gather_exp[] __initconst = { + BITMAP_FROM_U64(0x0000000000000029ULL), +}; + +static const unsigned long sg_scatter_exp[] __initconst = { + BITMAP_FROM_U64(0x000000000000021aULL), +}; + +static void __init test_bitmap_sg(void) +{ + unsigned int nbits = 64; + DECLARE_BITMAP(bmap_gather, 100); + DECLARE_BITMAP(bmap_scatter, 100); + DECLARE_BITMAP(bmap_tmp, 100); + DECLARE_BITMAP(bmap_res, 100); + + /* Simple gather call */ + bitmap_zero(bmap_gather, 100); + bitmap_gather(bmap_gather, sg_src, sg_mask, nbits); + expect_eq_bitmap(sg_gather_exp, bmap_gather, nbits); + + /* Simple scatter call */ + bitmap_zero(bmap_scatter, 100); + bitmap_scatter(bmap_scatter, sg_src, sg_mask, nbits); + expect_eq_bitmap(sg_scatter_exp, bmap_scatter, nbits); + + /* Scatter/gather relationship */ + bitmap_zero(bmap_tmp, 100); + bitmap_gather(bmap_tmp, bmap_scatter, sg_mask, nbits); + bitmap_scatter(bmap_res, bmap_tmp, sg_mask, nbits); + expect_eq_bitmap(bmap_scatter, bmap_res, nbits); +} + #define PARSE_TIME 0x1 #define NO_LEN 0x2 @@ -507,7 +548,7 @@ static void __init test_bitmap_parselist(void) } if (ptest.flags & PARSE_TIME) - pr_err("parselist: %d: input is '%s' OK, Time: %llu\n", + pr_info("parselist: %d: input is '%s' OK, Time: %llu\n", i, ptest.in, time); #undef ptest @@ -546,7 +587,7 @@ static void __init test_bitmap_printlist(void) goto out; } - pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time); + pr_info("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time); out: kfree(buf); kfree(bmap); @@ -624,7 +665,7 @@ static void __init test_bitmap_parse(void) } if (test.flags & PARSE_TIME) - pr_err("parse: %d: input is '%s' OK, Time: %llu\n", + pr_info("parse: %d: input is '%s' OK, Time: %llu\n", i, test.in, time); } } @@ -1204,14 +1245,7 @@ static void __init test_bitmap_const_eval(void) * in runtime. */ - /* - * Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }`. - * Clang on s390 optimizes bitops at compile-time as intended, but at - * the same time stops treating @bitmap and @bitopvar as compile-time - * constants after regular test_bit() is executed, thus triggering the - * build bugs below. So, call const_test_bit() there directly until - * the compiler is fixed. - */ + /* Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }` */ bitmap_clear(bitmap, 0, BITS_PER_LONG); if (!test_bit(7, bitmap)) bitmap_set(bitmap, 5, 2); @@ -1243,8 +1277,179 @@ static void __init test_bitmap_const_eval(void) /* ~BIT(25) */ BUILD_BUG_ON(!__builtin_constant_p(~var)); BUILD_BUG_ON(~var != ~BIT(25)); + + /* ~BIT(25) | BIT(25) == ~0UL */ + bitmap_complement(&var, &var, BITS_PER_LONG); + __assign_bit(25, &var, true); + + /* !(~(~0UL)) == 1 */ + res = bitmap_full(&var, BITS_PER_LONG); + BUILD_BUG_ON(!__builtin_constant_p(res)); + BUILD_BUG_ON(!res); +} + +/* + * Test bitmap should be big enough to include the cases when start is not in + * the first word, and start+nbits lands in the following word. + */ +#define TEST_BIT_LEN (1000) + +/* + * Helper function to test bitmap_write() overwriting the chosen byte pattern. + */ +static void __init test_bitmap_write_helper(const char *pattern) +{ + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); + DECLARE_BITMAP(exp_bitmap, TEST_BIT_LEN); + DECLARE_BITMAP(pat_bitmap, TEST_BIT_LEN); + unsigned long w, r, bit; + int i, n, nbits; + + /* + * Only parse the pattern once and store the result in the intermediate + * bitmap. + */ + bitmap_parselist(pattern, pat_bitmap, TEST_BIT_LEN); + + /* + * Check that writing a single bit does not accidentally touch the + * adjacent bits. + */ + for (i = 0; i < TEST_BIT_LEN; i++) { + bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN); + bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN); + for (bit = 0; bit <= 1; bit++) { + bitmap_write(bitmap, bit, i, 1); + __assign_bit(i, exp_bitmap, bit); + expect_eq_bitmap(exp_bitmap, bitmap, + TEST_BIT_LEN); + } + } + + /* Ensure writing 0 bits does not change anything. */ + bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN); + bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN); + for (i = 0; i < TEST_BIT_LEN; i++) { + bitmap_write(bitmap, ~0UL, i, 0); + expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN); + } + + for (nbits = BITS_PER_LONG; nbits >= 1; nbits--) { + w = IS_ENABLED(CONFIG_64BIT) ? 0xdeadbeefdeadbeefUL + : 0xdeadbeefUL; + w >>= (BITS_PER_LONG - nbits); + for (i = 0; i <= TEST_BIT_LEN - nbits; i++) { + bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN); + bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN); + for (n = 0; n < nbits; n++) + __assign_bit(i + n, exp_bitmap, w & BIT(n)); + bitmap_write(bitmap, w, i, nbits); + expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN); + r = bitmap_read(bitmap, i, nbits); + expect_eq_ulong(r, w); + } + } +} + +static void __init test_bitmap_read_write(void) +{ + unsigned char *pattern[3] = {"", "all:1/2", "all"}; + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); + unsigned long zero_bits = 0, bits_per_long = BITS_PER_LONG; + unsigned long val; + int i, pi; + + /* + * Reading/writing zero bits should not crash the kernel. + * READ_ONCE() prevents constant folding. + */ + bitmap_write(NULL, 0, 0, READ_ONCE(zero_bits)); + /* Return value of bitmap_read() is undefined here. */ + bitmap_read(NULL, 0, READ_ONCE(zero_bits)); + + /* + * Reading/writing more than BITS_PER_LONG bits should not crash the + * kernel. READ_ONCE() prevents constant folding. + */ + bitmap_write(NULL, 0, 0, READ_ONCE(bits_per_long) + 1); + /* Return value of bitmap_read() is undefined here. */ + bitmap_read(NULL, 0, READ_ONCE(bits_per_long) + 1); + + /* + * Ensure that bitmap_read() reads the same value that was previously + * written, and two consequent values are correctly merged. + * The resulting bit pattern is asymmetric to rule out possible issues + * with bit numeration order. + */ + for (i = 0; i < TEST_BIT_LEN - 7; i++) { + bitmap_zero(bitmap, TEST_BIT_LEN); + + bitmap_write(bitmap, 0b10101UL, i, 5); + val = bitmap_read(bitmap, i, 5); + expect_eq_ulong(0b10101UL, val); + + bitmap_write(bitmap, 0b101UL, i + 5, 3); + val = bitmap_read(bitmap, i + 5, 3); + expect_eq_ulong(0b101UL, val); + + val = bitmap_read(bitmap, i, 8); + expect_eq_ulong(0b10110101UL, val); + } + + for (pi = 0; pi < ARRAY_SIZE(pattern); pi++) + test_bitmap_write_helper(pattern[pi]); } +static void __init test_bitmap_read_perf(void) +{ + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); + unsigned int cnt, nbits, i; + unsigned long val; + ktime_t time; + + bitmap_fill(bitmap, TEST_BIT_LEN); + time = ktime_get(); + for (cnt = 0; cnt < 5; cnt++) { + for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) { + for (i = 0; i < TEST_BIT_LEN; i++) { + if (i + nbits > TEST_BIT_LEN) + break; + /* + * Prevent the compiler from optimizing away the + * bitmap_read() by using its value. + */ + WRITE_ONCE(val, bitmap_read(bitmap, i, nbits)); + } + } + } + time = ktime_get() - time; + pr_info("Time spent in %s:\t%llu\n", __func__, time); +} + +static void __init test_bitmap_write_perf(void) +{ + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); + unsigned int cnt, nbits, i; + unsigned long val = 0xfeedface; + ktime_t time; + + bitmap_zero(bitmap, TEST_BIT_LEN); + time = ktime_get(); + for (cnt = 0; cnt < 5; cnt++) { + for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) { + for (i = 0; i < TEST_BIT_LEN; i++) { + if (i + nbits > TEST_BIT_LEN) + break; + bitmap_write(bitmap, val, i, nbits); + } + } + } + time = ktime_get() - time; + pr_info("Time spent in %s:\t%llu\n", __func__, time); +} + +#undef TEST_BIT_LEN + static void __init selftest(void) { test_zero_clear(); @@ -1252,6 +1457,7 @@ static void __init selftest(void) test_copy(); test_bitmap_region(); test_replace(); + test_bitmap_sg(); test_bitmap_arr32(); test_bitmap_arr64(); test_bitmap_parse(); @@ -1261,6 +1467,9 @@ static void __init selftest(void) test_bitmap_cut(); test_bitmap_print_buf(); test_bitmap_const_eval(); + test_bitmap_read_write(); + test_bitmap_read_perf(); + test_bitmap_write_perf(); test_find_nth_bit(); test_for_each_set_bit(); diff --git a/lib/test_blackhole_dev.c b/lib/test_blackhole_dev.c index 4c40580a99a3..f247089d63c0 100644 --- a/lib/test_blackhole_dev.c +++ b/lib/test_blackhole_dev.c @@ -29,7 +29,6 @@ static int __init test_blackholedev_init(void) { struct ipv6hdr *ip6h; struct sk_buff *skb; - struct ethhdr *ethh; struct udphdr *uh; int data_len; int ret; @@ -61,7 +60,7 @@ static int __init test_blackholedev_init(void) ip6h->saddr = in6addr_loopback; ip6h->daddr = in6addr_loopback; /* Ether */ - ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr)); + skb_push(skb, sizeof(struct ethhdr)); skb_set_mac_header(skb, 0); skb->protocol = htons(ETH_P_IPV6); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 7916503e6a6a..207ff87194db 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -5144,22 +5144,6 @@ static struct bpf_test tests[] = { { }, { { 0, 0x1 } }, }, - { - "ALU_MOVSX | BPF_W", - .u.insns_int = { - BPF_LD_IMM64(R2, 0x00000000deadbeefLL), - BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL), - BPF_MOVSX32_REG(R1, R3, 32), - BPF_JMP_REG(BPF_JEQ, R2, R1, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, - { }, - { { 0, 0x1 } }, - }, /* MOVSX64 REG */ { "ALU64_MOVSX | BPF_B", @@ -6293,7 +6277,7 @@ static struct bpf_test tests[] = { }, /* BPF_ALU64 | BPF_MOD | BPF_K off=1 (SMOD64) */ { - "ALU64_SMOD_X: -7 % 2 = -1", + "ALU64_SMOD_K: -7 % 2 = -1", .u.insns_int = { BPF_LD_IMM64(R0, -7), BPF_ALU64_IMM_OFF(BPF_MOD, R0, 2, 1), @@ -12215,7 +12199,6 @@ static struct bpf_test tests[] = { BPF_JMP32_IMM_ZEXT(JLE), BPF_JMP32_IMM_ZEXT(JSGT), BPF_JMP32_IMM_ZEXT(JSGE), - BPF_JMP32_IMM_ZEXT(JSGT), BPF_JMP32_IMM_ZEXT(JSLT), BPF_JMP32_IMM_ZEXT(JSLE), #undef BPF_JMP2_IMM_ZEXT @@ -12251,7 +12234,6 @@ static struct bpf_test tests[] = { BPF_JMP32_REG_ZEXT(JLE), BPF_JMP32_REG_ZEXT(JSGT), BPF_JMP32_REG_ZEXT(JSGE), - BPF_JMP32_REG_ZEXT(JSGT), BPF_JMP32_REG_ZEXT(JSLT), BPF_JMP32_REG_ZEXT(JSLE), #undef BPF_JMP2_REG_ZEXT @@ -13449,7 +13431,7 @@ static struct bpf_test tests[] = { .stack_depth = 8, .nr_testruns = NR_PATTERN_RUNS, }, - /* 64-bit atomic magnitudes */ + /* 32-bit atomic magnitudes */ { "ATOMIC_W_ADD: all operand magnitudes", { }, diff --git a/lib/test_firmware.c b/lib/test_firmware.c index add4699fc6cd..9cfdcd6d21db 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -1132,6 +1132,7 @@ static const char * const fw_upload_err_str[] = { [FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size", [FW_UPLOAD_ERR_RW_ERROR] = "read-write-error", [FW_UPLOAD_ERR_WEAROUT] = "flash-wearout", + [FW_UPLOAD_ERR_FW_INVALID] = "firmware-invalid", }; static void upload_err_inject_error(struct test_firmware_upload *tst, diff --git a/lib/test_fortify/write_overflow-strlcpy-src.c b/lib/test_fortify/write_overflow-strlcpy-src.c deleted file mode 100644 index 91bf83ebd34a..000000000000 --- a/lib/test_fortify/write_overflow-strlcpy-src.c +++ /dev/null @@ -1,5 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -#define TEST \ - strlcpy(small, large_src, sizeof(small) + 1) - -#include "test_fortify.h" diff --git a/lib/test_fortify/write_overflow-strlcpy.c b/lib/test_fortify/write_overflow-strlcpy.c deleted file mode 100644 index 1883db7c0cd6..000000000000 --- a/lib/test_fortify/write_overflow-strlcpy.c +++ /dev/null @@ -1,5 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -#define TEST \ - strlcpy(instance.buf, large_src, sizeof(instance.buf) + 1) - -#include "test_fortify.h" diff --git a/lib/test_ida.c b/lib/test_ida.c index b06880625961..072a49897e71 100644 --- a/lib/test_ida.c +++ b/lib/test_ida.c @@ -13,7 +13,7 @@ static unsigned int tests_run; static unsigned int tests_passed; #ifdef __KERNEL__ -void ida_dump(struct ida *ida) { } +static void ida_dump(struct ida *ida) { } #endif #define IDA_BUG_ON(ida, x) do { \ tests_run++; \ @@ -150,6 +150,45 @@ static void ida_check_conv(struct ida *ida) IDA_BUG_ON(ida, !ida_is_empty(ida)); } +/* + * Check various situations where we attempt to free an ID we don't own. + */ +static void ida_check_bad_free(struct ida *ida) +{ + unsigned long i; + + printk("vvv Ignore \"not allocated\" warnings\n"); + /* IDA is empty; all of these will fail */ + ida_free(ida, 0); + for (i = 0; i < 31; i++) + ida_free(ida, 1 << i); + + /* IDA contains a single value entry */ + IDA_BUG_ON(ida, ida_alloc_min(ida, 3, GFP_KERNEL) != 3); + ida_free(ida, 0); + for (i = 0; i < 31; i++) + ida_free(ida, 1 << i); + + /* IDA contains a single bitmap */ + IDA_BUG_ON(ida, ida_alloc_min(ida, 1023, GFP_KERNEL) != 1023); + ida_free(ida, 0); + for (i = 0; i < 31; i++) + ida_free(ida, 1 << i); + + /* IDA contains a tree */ + IDA_BUG_ON(ida, ida_alloc_min(ida, (1 << 20) - 1, GFP_KERNEL) != (1 << 20) - 1); + ida_free(ida, 0); + for (i = 0; i < 31; i++) + ida_free(ida, 1 << i); + printk("^^^ \"not allocated\" warnings over\n"); + + ida_free(ida, 3); + ida_free(ida, 1023); + ida_free(ida, (1 << 20) - 1); + + IDA_BUG_ON(ida, !ida_is_empty(ida)); +} + static DEFINE_IDA(ida); static int ida_checks(void) @@ -162,6 +201,7 @@ static int ida_checks(void) ida_check_leaf(&ida, 1024 * 64); ida_check_max(&ida); ida_check_conv(&ida); + ida_check_bad_free(&ida); printk("IDA: %u of %u tests passed\n", tests_passed, tests_run); return (tests_run != tests_passed) ? 0 : -EINVAL; diff --git a/lib/test_kmod.c b/lib/test_kmod.c index 43d9dfd57ab7..1eec3b7ac67c 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -58,11 +58,14 @@ static int num_test_devs; * @need_mod_put for your tests case. */ enum kmod_test_case { + /* private: */ __TEST_KMOD_INVALID = 0, + /* public: */ TEST_KMOD_DRIVER, TEST_KMOD_FS_TYPE, + /* private: */ __TEST_KMOD_MAX, }; @@ -82,6 +85,7 @@ struct kmod_test_device; * @ret_sync: return value if request_module() is used, sync request for * @TEST_KMOD_DRIVER * @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE + * @task_sync: kthread's task_struct or %NULL if not running * @thread_idx: thread ID * @test_dev: test device test is being performed under * @need_mod_put: Some tests (get_fs_type() is one) requires putting the module @@ -108,7 +112,7 @@ struct kmod_test_device_info { * @dev: pointer to misc_dev's own struct device * @config_mutex: protects configuration of test * @trigger_mutex: the test trigger can only be fired once at a time - * @thread_lock: protects @done count, and the @info per each thread + * @thread_mutex: protects @done count, and the @info per each thread * @done: number of threads which have completed or failed * @test_is_oom: when we run out of memory, use this to halt moving forward * @kthreads_done: completion used to signal when all work is done diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 464eeb90d5ad..399380db449c 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -43,6 +43,7 @@ atomic_t maple_tree_tests_passed; /* #define BENCH_NODE_STORE */ /* #define BENCH_AWALK */ /* #define BENCH_WALK */ +/* #define BENCH_LOAD */ /* #define BENCH_MT_FOR_EACH */ /* #define BENCH_FORK */ /* #define BENCH_MAS_FOR_EACH */ @@ -54,6 +55,11 @@ atomic_t maple_tree_tests_passed; #else #define cond_resched() do {} while (0) #endif + +#define mas_is_none(x) ((x)->status == ma_none) +#define mas_is_overflow(x) ((x)->status == ma_overflow) +#define mas_is_underflow(x) ((x)->status == ma_underflow) + static int __init mtree_insert_index(struct maple_tree *mt, unsigned long index, gfp_t gfp) { @@ -582,7 +588,7 @@ static noinline void __init check_find(struct maple_tree *mt) MT_BUG_ON(mt, last != mas.last); - mas.node = MAS_NONE; + mas.status = ma_none; mas.index = ULONG_MAX; mas.last = ULONG_MAX; entry2 = mas_prev(&mas, 0); @@ -1749,6 +1755,19 @@ static noinline void __init bench_walk(struct maple_tree *mt) } #endif +#if defined(BENCH_LOAD) +static noinline void __init bench_load(struct maple_tree *mt) +{ + int i, max = 2500, count = 550000000; + + for (i = 0; i < max; i += 10) + mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL); + + for (i = 0; i < count; i++) + mtree_load(mt, 1470); +} +#endif + #if defined(BENCH_MT_FOR_EACH) static noinline void __init bench_mt_for_each(struct maple_tree *mt) { @@ -1834,47 +1853,48 @@ static noinline void __init bench_mas_prev(struct maple_tree *mt) } #endif /* check_forking - simulate the kernel forking sequence with the tree. */ -static noinline void __init check_forking(struct maple_tree *mt) +static noinline void __init check_forking(void) { - - struct maple_tree newmt; - int i, nr_entries = 134; + struct maple_tree mt, newmt; + int i, nr_entries = 134, ret; void *val; - MA_STATE(mas, mt, 0, 0); - MA_STATE(newmas, mt, 0, 0); - struct rw_semaphore newmt_lock; + MA_STATE(mas, &mt, 0, 0); + MA_STATE(newmas, &newmt, 0, 0); + struct rw_semaphore mt_lock, newmt_lock; + init_rwsem(&mt_lock); init_rwsem(&newmt_lock); - for (i = 0; i <= nr_entries; i++) - mtree_store_range(mt, i*10, i*10 + 5, - xa_mk_value(i), GFP_KERNEL); + mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN); + mt_set_external_lock(&mt, &mt_lock); - mt_set_non_kernel(99999); mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN); mt_set_external_lock(&newmt, &newmt_lock); - newmas.tree = &newmt; - mas_reset(&newmas); - mas_reset(&mas); - down_write(&newmt_lock); - mas.index = 0; - mas.last = 0; - if (mas_expected_entries(&newmas, nr_entries)) { + + down_write(&mt_lock); + for (i = 0; i <= nr_entries; i++) { + mas_set_range(&mas, i*10, i*10 + 5); + mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL); + } + + down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING); + ret = __mt_dup(&mt, &newmt, GFP_KERNEL); + if (ret) { pr_err("OOM!"); BUG_ON(1); } - rcu_read_lock(); - mas_for_each(&mas, val, ULONG_MAX) { - newmas.index = mas.index; - newmas.last = mas.last; + + mas_set(&newmas, 0); + mas_for_each(&newmas, val, ULONG_MAX) mas_store(&newmas, val); - } - rcu_read_unlock(); + mas_destroy(&newmas); + mas_destroy(&mas); mt_validate(&newmt); - mt_set_non_kernel(0); __mt_destroy(&newmt); + __mt_destroy(&mt); up_write(&newmt_lock); + up_write(&mt_lock); } static noinline void __init check_iteration(struct maple_tree *mt) @@ -1977,49 +1997,51 @@ static noinline void __init check_mas_store_gfp(struct maple_tree *mt) } #if defined(BENCH_FORK) -static noinline void __init bench_forking(struct maple_tree *mt) +static noinline void __init bench_forking(void) { - - struct maple_tree newmt; - int i, nr_entries = 134, nr_fork = 80000; + struct maple_tree mt, newmt; + int i, nr_entries = 134, nr_fork = 80000, ret; void *val; - MA_STATE(mas, mt, 0, 0); - MA_STATE(newmas, mt, 0, 0); - struct rw_semaphore newmt_lock; + MA_STATE(mas, &mt, 0, 0); + MA_STATE(newmas, &newmt, 0, 0); + struct rw_semaphore mt_lock, newmt_lock; + init_rwsem(&mt_lock); init_rwsem(&newmt_lock); - mt_set_external_lock(&newmt, &newmt_lock); - for (i = 0; i <= nr_entries; i++) - mtree_store_range(mt, i*10, i*10 + 5, - xa_mk_value(i), GFP_KERNEL); + mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN); + mt_set_external_lock(&mt, &mt_lock); + + down_write(&mt_lock); + for (i = 0; i <= nr_entries; i++) { + mas_set_range(&mas, i*10, i*10 + 5); + mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL); + } for (i = 0; i < nr_fork; i++) { - mt_set_non_kernel(99999); - mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE); - newmas.tree = &newmt; - mas_reset(&newmas); - mas_reset(&mas); - mas.index = 0; - mas.last = 0; - rcu_read_lock(); - down_write(&newmt_lock); - if (mas_expected_entries(&newmas, nr_entries)) { - printk("OOM!"); + mt_init_flags(&newmt, + MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN); + mt_set_external_lock(&newmt, &newmt_lock); + + down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING); + ret = __mt_dup(&mt, &newmt, GFP_KERNEL); + if (ret) { + pr_err("OOM!"); BUG_ON(1); } - mas_for_each(&mas, val, ULONG_MAX) { - newmas.index = mas.index; - newmas.last = mas.last; + + mas_set(&newmas, 0); + mas_for_each(&newmas, val, ULONG_MAX) mas_store(&newmas, val); - } + mas_destroy(&newmas); - rcu_read_unlock(); mt_validate(&newmt); - mt_set_non_kernel(0); __mt_destroy(&newmt); up_write(&newmt_lock); } + mas_destroy(&mas); + __mt_destroy(&mt); + up_write(&mt_lock); } #endif @@ -2175,7 +2197,7 @@ static noinline void __init next_prev_test(struct maple_tree *mt) MT_BUG_ON(mt, val != NULL); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 5); - MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); + MT_BUG_ON(mt, !mas_is_underflow(&mas)); mas.index = 0; mas.last = 5; @@ -3039,10 +3061,6 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt) * DNE active active range of NULL */ -#define mas_active(x) (((x).node != MAS_ROOT) && \ - ((x).node != MAS_START) && \ - ((x).node != MAS_PAUSE) && \ - ((x).node != MAS_NONE)) static noinline void __init check_state_handling(struct maple_tree *mt) { MA_STATE(mas, mt, 0, 0); @@ -3057,7 +3075,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) /* prev: Start -> underflow*/ entry = mas_prev(&mas, 0); MT_BUG_ON(mt, entry != NULL); - MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); + MT_BUG_ON(mt, mas.status != ma_underflow); /* prev: Start -> root */ mas_set(&mas, 10); @@ -3065,7 +3083,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0); - MT_BUG_ON(mt, mas.node != MAS_ROOT); + MT_BUG_ON(mt, mas.status != ma_root); /* prev: pause -> root */ mas_set(&mas, 10); @@ -3074,7 +3092,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0); - MT_BUG_ON(mt, mas.node != MAS_ROOT); + MT_BUG_ON(mt, mas.status != ma_root); /* next: start -> none */ mas_set(&mas, 0); @@ -3082,7 +3100,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, mas.index != 1); MT_BUG_ON(mt, mas.last != ULONG_MAX); MT_BUG_ON(mt, entry != NULL); - MT_BUG_ON(mt, mas.node != MAS_NONE); + MT_BUG_ON(mt, mas.status != ma_none); /* next: start -> none*/ mas_set(&mas, 10); @@ -3090,7 +3108,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, mas.index != 1); MT_BUG_ON(mt, mas.last != ULONG_MAX); MT_BUG_ON(mt, entry != NULL); - MT_BUG_ON(mt, mas.node != MAS_NONE); + MT_BUG_ON(mt, mas.status != ma_none); /* find: start -> root */ mas_set(&mas, 0); @@ -3098,21 +3116,21 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0); - MT_BUG_ON(mt, mas.node != MAS_ROOT); + MT_BUG_ON(mt, mas.status != ma_root); /* find: root -> none */ entry = mas_find(&mas, ULONG_MAX); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 1); MT_BUG_ON(mt, mas.last != ULONG_MAX); - MT_BUG_ON(mt, mas.node != MAS_NONE); + MT_BUG_ON(mt, mas.status != ma_none); /* find: none -> none */ entry = mas_find(&mas, ULONG_MAX); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 1); MT_BUG_ON(mt, mas.last != ULONG_MAX); - MT_BUG_ON(mt, mas.node != MAS_NONE); + MT_BUG_ON(mt, mas.status != ma_none); /* find: start -> none */ mas_set(&mas, 10); @@ -3120,14 +3138,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 1); MT_BUG_ON(mt, mas.last != ULONG_MAX); - MT_BUG_ON(mt, mas.node != MAS_NONE); + MT_BUG_ON(mt, mas.status != ma_none); /* find_rev: none -> root */ entry = mas_find_rev(&mas, 0); MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0); - MT_BUG_ON(mt, mas.node != MAS_ROOT); + MT_BUG_ON(mt, mas.status != ma_root); /* find_rev: start -> root */ mas_set(&mas, 0); @@ -3135,21 +3153,21 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0); - MT_BUG_ON(mt, mas.node != MAS_ROOT); + MT_BUG_ON(mt, mas.status != ma_root); /* find_rev: root -> none */ entry = mas_find_rev(&mas, 0); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0); - MT_BUG_ON(mt, mas.node != MAS_NONE); + MT_BUG_ON(mt, mas.status != ma_none); /* find_rev: none -> none */ entry = mas_find_rev(&mas, 0); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0); - MT_BUG_ON(mt, mas.node != MAS_NONE); + MT_BUG_ON(mt, mas.status != ma_none); /* find_rev: start -> root */ mas_set(&mas, 10); @@ -3157,7 +3175,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0); - MT_BUG_ON(mt, mas.node != MAS_ROOT); + MT_BUG_ON(mt, mas.status != ma_root); /* walk: start -> none */ mas_set(&mas, 10); @@ -3165,7 +3183,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 1); MT_BUG_ON(mt, mas.last != ULONG_MAX); - MT_BUG_ON(mt, mas.node != MAS_NONE); + MT_BUG_ON(mt, mas.status != ma_none); /* walk: pause -> none*/ mas_set(&mas, 10); @@ -3174,7 +3192,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 1); MT_BUG_ON(mt, mas.last != ULONG_MAX); - MT_BUG_ON(mt, mas.node != MAS_NONE); + MT_BUG_ON(mt, mas.status != ma_none); /* walk: none -> none */ mas.index = mas.last = 10; @@ -3182,14 +3200,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 1); MT_BUG_ON(mt, mas.last != ULONG_MAX); - MT_BUG_ON(mt, mas.node != MAS_NONE); + MT_BUG_ON(mt, mas.status != ma_none); /* walk: none -> none */ entry = mas_walk(&mas); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 1); MT_BUG_ON(mt, mas.last != ULONG_MAX); - MT_BUG_ON(mt, mas.node != MAS_NONE); + MT_BUG_ON(mt, mas.status != ma_none); /* walk: start -> root */ mas_set(&mas, 0); @@ -3197,7 +3215,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0); - MT_BUG_ON(mt, mas.node != MAS_ROOT); + MT_BUG_ON(mt, mas.status != ma_root); /* walk: pause -> root */ mas_set(&mas, 0); @@ -3206,22 +3224,22 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0); - MT_BUG_ON(mt, mas.node != MAS_ROOT); + MT_BUG_ON(mt, mas.status != ma_root); /* walk: none -> root */ - mas.node = MAS_NONE; + mas.status = ma_none; entry = mas_walk(&mas); MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0); - MT_BUG_ON(mt, mas.node != MAS_ROOT); + MT_BUG_ON(mt, mas.status != ma_root); /* walk: root -> root */ entry = mas_walk(&mas); MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0); - MT_BUG_ON(mt, mas.node != MAS_ROOT); + MT_BUG_ON(mt, mas.status != ma_root); /* walk: root -> none */ mas_set(&mas, 10); @@ -3229,7 +3247,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 1); MT_BUG_ON(mt, mas.last != ULONG_MAX); - MT_BUG_ON(mt, mas.node != MAS_NONE); + MT_BUG_ON(mt, mas.status != ma_none); /* walk: none -> root */ mas.index = mas.last = 0; @@ -3237,7 +3255,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0); - MT_BUG_ON(mt, mas.node != MAS_ROOT); + MT_BUG_ON(mt, mas.status != ma_root); mas_unlock(&mas); @@ -3255,7 +3273,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* next: pause ->active */ mas_set(&mas, 0); @@ -3264,126 +3282,132 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* next: none ->active */ mas.index = mas.last = 0; mas.offset = 0; - mas.node = MAS_NONE; + mas.status = ma_none; entry = mas_next(&mas, ULONG_MAX); MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); - /* next:active ->active */ - entry = mas_next(&mas, ULONG_MAX); + /* next:active ->active (spanning limit) */ + entry = mas_next(&mas, 0x2100); MT_BUG_ON(mt, entry != ptr2); MT_BUG_ON(mt, mas.index != 0x2000); MT_BUG_ON(mt, mas.last != 0x2500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); - /* next:active -> active beyond data */ + /* next:active -> overflow (limit reached) beyond data */ entry = mas_next(&mas, 0x2999); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0x2501); MT_BUG_ON(mt, mas.last != 0x2fff); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_overflow(&mas)); - /* Continue after last range ends after max */ + /* next:overflow -> active (limit changed) */ entry = mas_next(&mas, ULONG_MAX); MT_BUG_ON(mt, entry != ptr3); MT_BUG_ON(mt, mas.index != 0x3000); MT_BUG_ON(mt, mas.last != 0x3500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); - /* next:active -> active continued */ + /* next:active -> overflow (limit reached) */ entry = mas_next(&mas, ULONG_MAX); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0x3501); MT_BUG_ON(mt, mas.last != ULONG_MAX); - MT_BUG_ON(mt, !mas_active(mas)); - - /* next:active -> overflow */ - entry = mas_next(&mas, ULONG_MAX); - MT_BUG_ON(mt, entry != NULL); - MT_BUG_ON(mt, mas.index != 0x3501); - MT_BUG_ON(mt, mas.last != ULONG_MAX); - MT_BUG_ON(mt, mas.node != MAS_OVERFLOW); + MT_BUG_ON(mt, !mas_is_overflow(&mas)); /* next:overflow -> overflow */ entry = mas_next(&mas, ULONG_MAX); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0x3501); MT_BUG_ON(mt, mas.last != ULONG_MAX); - MT_BUG_ON(mt, mas.node != MAS_OVERFLOW); + MT_BUG_ON(mt, !mas_is_overflow(&mas)); /* prev:overflow -> active */ entry = mas_prev(&mas, 0); MT_BUG_ON(mt, entry != ptr3); MT_BUG_ON(mt, mas.index != 0x3000); MT_BUG_ON(mt, mas.last != 0x3500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* next: none -> active, skip value at location */ mas_set(&mas, 0); entry = mas_next(&mas, ULONG_MAX); - mas.node = MAS_NONE; + mas.status = ma_none; mas.offset = 0; entry = mas_next(&mas, ULONG_MAX); MT_BUG_ON(mt, entry != ptr2); MT_BUG_ON(mt, mas.index != 0x2000); MT_BUG_ON(mt, mas.last != 0x2500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* prev:active ->active */ entry = mas_prev(&mas, 0); MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); - /* prev:active -> active spanning end range */ + /* prev:active -> underflow (span limit) */ + mas_next(&mas, ULONG_MAX); + entry = mas_prev(&mas, 0x1200); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* spanning limit */ + entry = mas_prev(&mas, 0x1200); /* underflow */ + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_is_underflow(&mas)); + + /* prev:underflow -> underflow (lower limit) spanning end range */ entry = mas_prev(&mas, 0x0100); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0x0FFF); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_underflow(&mas)); - /* prev:active -> underflow */ + /* prev:underflow -> underflow */ entry = mas_prev(&mas, 0); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0x0FFF); - MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); + MT_BUG_ON(mt, !mas_is_underflow(&mas)); /* prev:underflow -> underflow */ entry = mas_prev(&mas, 0); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0x0FFF); - MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); + MT_BUG_ON(mt, !mas_is_underflow(&mas)); /* next:underflow -> active */ entry = mas_next(&mas, ULONG_MAX); MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* prev:first value -> underflow */ entry = mas_prev(&mas, 0x1000); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); + MT_BUG_ON(mt, !mas_is_underflow(&mas)); /* find:underflow -> first value */ entry = mas_find(&mas, ULONG_MAX); MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* prev: pause ->active */ mas_set(&mas, 0x3600); @@ -3394,21 +3418,21 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr2); MT_BUG_ON(mt, mas.index != 0x2000); MT_BUG_ON(mt, mas.last != 0x2500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); - /* prev:active -> active spanning min */ + /* prev:active -> underflow spanning min */ entry = mas_prev(&mas, 0x1600); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0x1501); MT_BUG_ON(mt, mas.last != 0x1FFF); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_underflow(&mas)); /* prev: active ->active, continue */ entry = mas_prev(&mas, 0); MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* find: start ->active */ mas_set(&mas, 0); @@ -3416,7 +3440,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* find: pause ->active */ mas_set(&mas, 0); @@ -3425,7 +3449,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* find: start ->active on value */; mas_set(&mas, 1200); @@ -3433,14 +3457,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* find:active ->active */ entry = mas_find(&mas, ULONG_MAX); MT_BUG_ON(mt, entry != ptr2); MT_BUG_ON(mt, mas.index != 0x2000); MT_BUG_ON(mt, mas.last != 0x2500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* find:active -> active (NULL)*/ @@ -3448,35 +3472,35 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0x2501); MT_BUG_ON(mt, mas.last != 0x2FFF); - MT_BUG_ON(mt, !mas_active(mas)); + MAS_BUG_ON(&mas, !mas_is_active(&mas)); /* find: overflow ->active */ entry = mas_find(&mas, 0x5000); MT_BUG_ON(mt, entry != ptr3); MT_BUG_ON(mt, mas.index != 0x3000); MT_BUG_ON(mt, mas.last != 0x3500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* find:active -> active (NULL) end*/ entry = mas_find(&mas, ULONG_MAX); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0x3501); MT_BUG_ON(mt, mas.last != ULONG_MAX); - MT_BUG_ON(mt, !mas_active(mas)); + MAS_BUG_ON(&mas, !mas_is_active(&mas)); /* find_rev: active (END) ->active */ entry = mas_find_rev(&mas, 0); MT_BUG_ON(mt, entry != ptr3); MT_BUG_ON(mt, mas.index != 0x3000); MT_BUG_ON(mt, mas.last != 0x3500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* find_rev:active ->active */ entry = mas_find_rev(&mas, 0); MT_BUG_ON(mt, entry != ptr2); MT_BUG_ON(mt, mas.index != 0x2000); MT_BUG_ON(mt, mas.last != 0x2500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* find_rev: pause ->active */ mas_pause(&mas); @@ -3484,14 +3508,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); - /* find_rev:active -> active */ + /* find_rev:active -> underflow */ entry = mas_find_rev(&mas, 0); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != 0x0FFF); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_underflow(&mas)); /* find_rev: start ->active */ mas_set(&mas, 0x1200); @@ -3499,7 +3523,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* mas_walk start ->active */ mas_set(&mas, 0x1200); @@ -3507,7 +3531,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* mas_walk start ->active */ mas_set(&mas, 0x1600); @@ -3515,7 +3539,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0x1501); MT_BUG_ON(mt, mas.last != 0x1fff); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* mas_walk pause ->active */ mas_set(&mas, 0x1200); @@ -3524,7 +3548,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* mas_walk pause -> active */ mas_set(&mas, 0x1600); @@ -3533,25 +3557,25 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0x1501); MT_BUG_ON(mt, mas.last != 0x1fff); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* mas_walk none -> active */ mas_set(&mas, 0x1200); - mas.node = MAS_NONE; + mas.status = ma_none; entry = mas_walk(&mas); MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* mas_walk none -> active */ mas_set(&mas, 0x1600); - mas.node = MAS_NONE; + mas.status = ma_none; entry = mas_walk(&mas); MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0x1501); MT_BUG_ON(mt, mas.last != 0x1fff); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* mas_walk active -> active */ mas.index = 0x1200; @@ -3561,7 +3585,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != ptr); MT_BUG_ON(mt, mas.index != 0x1000); MT_BUG_ON(mt, mas.last != 0x1500); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); /* mas_walk active -> active */ mas.index = 0x1600; @@ -3570,11 +3594,50 @@ static noinline void __init check_state_handling(struct maple_tree *mt) MT_BUG_ON(mt, entry != NULL); MT_BUG_ON(mt, mas.index != 0x1501); MT_BUG_ON(mt, mas.last != 0x1fff); - MT_BUG_ON(mt, !mas_active(mas)); + MT_BUG_ON(mt, !mas_is_active(&mas)); mas_unlock(&mas); } +static noinline void __init alloc_cyclic_testing(struct maple_tree *mt) +{ + unsigned long location; + unsigned long next; + int ret = 0; + MA_STATE(mas, mt, 0, 0); + + next = 0; + mtree_lock(mt); + for (int i = 0; i < 100; i++) { + mas_alloc_cyclic(&mas, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL); + MAS_BUG_ON(&mas, i != location - 2); + MAS_BUG_ON(&mas, mas.index != location); + MAS_BUG_ON(&mas, mas.last != location); + MAS_BUG_ON(&mas, i != next - 3); + } + + mtree_unlock(mt); + mtree_destroy(mt); + next = 0; + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (int i = 0; i < 100; i++) { + mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL); + MT_BUG_ON(mt, i != location - 2); + MT_BUG_ON(mt, i != next - 3); + MT_BUG_ON(mt, mtree_load(mt, location) != mt); + } + + mtree_destroy(mt); + /* Overflow test */ + next = ULONG_MAX - 1; + ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL); + MT_BUG_ON(mt, ret != 0); + ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL); + MT_BUG_ON(mt, ret != 0); + ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL); + MT_BUG_ON(mt, ret != 1); +} + static DEFINE_MTREE(tree); static int __init maple_tree_seed(void) { @@ -3585,10 +3648,6 @@ static int __init maple_tree_seed(void) pr_info("\nTEST STARTING\n\n"); - mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); - check_root_expand(&tree); - mtree_destroy(&tree); - #if defined(BENCH_SLOT_STORE) #define BENCH mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); @@ -3617,13 +3676,18 @@ static int __init maple_tree_seed(void) mtree_destroy(&tree); goto skip; #endif -#if defined(BENCH_FORK) +#if defined(BENCH_LOAD) #define BENCH mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); - bench_forking(&tree); + bench_load(&tree); mtree_destroy(&tree); goto skip; #endif +#if defined(BENCH_FORK) +#define BENCH + bench_forking(); + goto skip; +#endif #if defined(BENCH_MT_FOR_EACH) #define BENCH mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); @@ -3647,13 +3711,15 @@ static int __init maple_tree_seed(void) #endif mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); - check_iteration(&tree); + check_root_expand(&tree); mtree_destroy(&tree); mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); - check_forking(&tree); + check_iteration(&tree); mtree_destroy(&tree); + check_forking(); + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); check_mas_store_gfp(&tree); mtree_destroy(&tree); @@ -3853,6 +3919,11 @@ static int __init maple_tree_seed(void) check_state_handling(&tree); mtree_destroy(&tree); + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + alloc_cyclic_testing(&tree); + mtree_destroy(&tree); + + #if defined(BENCH) skip: #endif diff --git a/lib/test_meminit.c b/lib/test_meminit.c index 0ae35223d773..0dc173849a54 100644 --- a/lib/test_meminit.c +++ b/lib/test_meminit.c @@ -93,7 +93,7 @@ static int __init test_pages(int *total_failures) int failures = 0, num_tests = 0; int i; - for (i = 0; i <= MAX_ORDER; i++) + for (i = 0; i < NR_PAGE_ORDERS; i++) num_tests += do_alloc_pages_order(i, &failures); REPORT_FAILURES_IN_FN(); diff --git a/lib/test_objpool.c b/lib/test_objpool.c index a94078402138..bfdb81599832 100644 --- a/lib/test_objpool.c +++ b/lib/test_objpool.c @@ -311,7 +311,7 @@ static void ot_fini_sync(struct ot_context *sop) ot_kfree(sop->test, sop, sizeof(*sop)); } -struct { +static struct { struct ot_context * (*init)(struct ot_test *oc); void (*fini)(struct ot_context *sop); } g_ot_sync_ops[] = { @@ -475,7 +475,7 @@ static struct ot_context *ot_init_async_m0(struct ot_test *test) return sop; } -struct { +static struct { struct ot_context * (*init)(struct ot_test *oc); void (*fini)(struct ot_context *sop); } g_ot_async_ops[] = { @@ -632,7 +632,7 @@ static int ot_start_async(struct ot_test *test) #define NODE_COMPACT sizeof(struct ot_node) #define NODE_VMALLOC (512) -struct ot_test g_testcases[] = { +static struct ot_test g_testcases[] = { /* sync & normal */ {0, 0, NODE_COMPACT, 1000, 0, 1, 0, 0, "sync: percpu objpool"}, diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index c20f6cb4bf55..42b585208249 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -16,6 +16,7 @@ #include <linux/kthread.h> #include <linux/module.h> #include <linux/rcupdate.h> +#include <linux/rcupdate_wait.h> #include <linux/rhashtable.h> #include <linux/slab.h> #include <linux/sched.h> diff --git a/lib/test_string.c b/lib/test_string.c deleted file mode 100644 index c5cb92fb710e..000000000000 --- a/lib/test_string.c +++ /dev/null @@ -1,257 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -#include <linux/module.h> -#include <linux/printk.h> -#include <linux/slab.h> -#include <linux/string.h> - -static __init int memset16_selftest(void) -{ - unsigned i, j, k; - u16 v, *p; - - p = kmalloc(256 * 2 * 2, GFP_KERNEL); - if (!p) - return -1; - - for (i = 0; i < 256; i++) { - for (j = 0; j < 256; j++) { - memset(p, 0xa1, 256 * 2 * sizeof(v)); - memset16(p + i, 0xb1b2, j); - for (k = 0; k < 512; k++) { - v = p[k]; - if (k < i) { - if (v != 0xa1a1) - goto fail; - } else if (k < i + j) { - if (v != 0xb1b2) - goto fail; - } else { - if (v != 0xa1a1) - goto fail; - } - } - } - } - -fail: - kfree(p); - if (i < 256) - return (i << 24) | (j << 16) | k | 0x8000; - return 0; -} - -static __init int memset32_selftest(void) -{ - unsigned i, j, k; - u32 v, *p; - - p = kmalloc(256 * 2 * 4, GFP_KERNEL); - if (!p) - return -1; - - for (i = 0; i < 256; i++) { - for (j = 0; j < 256; j++) { - memset(p, 0xa1, 256 * 2 * sizeof(v)); - memset32(p + i, 0xb1b2b3b4, j); - for (k = 0; k < 512; k++) { - v = p[k]; - if (k < i) { - if (v != 0xa1a1a1a1) - goto fail; - } else if (k < i + j) { - if (v != 0xb1b2b3b4) - goto fail; - } else { - if (v != 0xa1a1a1a1) - goto fail; - } - } - } - } - -fail: - kfree(p); - if (i < 256) - return (i << 24) | (j << 16) | k | 0x8000; - return 0; -} - -static __init int memset64_selftest(void) -{ - unsigned i, j, k; - u64 v, *p; - - p = kmalloc(256 * 2 * 8, GFP_KERNEL); - if (!p) - return -1; - - for (i = 0; i < 256; i++) { - for (j = 0; j < 256; j++) { - memset(p, 0xa1, 256 * 2 * sizeof(v)); - memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j); - for (k = 0; k < 512; k++) { - v = p[k]; - if (k < i) { - if (v != 0xa1a1a1a1a1a1a1a1ULL) - goto fail; - } else if (k < i + j) { - if (v != 0xb1b2b3b4b5b6b7b8ULL) - goto fail; - } else { - if (v != 0xa1a1a1a1a1a1a1a1ULL) - goto fail; - } - } - } - } - -fail: - kfree(p); - if (i < 256) - return (i << 24) | (j << 16) | k | 0x8000; - return 0; -} - -static __init int strchr_selftest(void) -{ - const char *test_string = "abcdefghijkl"; - const char *empty_string = ""; - char *result; - int i; - - for (i = 0; i < strlen(test_string) + 1; i++) { - result = strchr(test_string, test_string[i]); - if (result - test_string != i) - return i + 'a'; - } - - result = strchr(empty_string, '\0'); - if (result != empty_string) - return 0x101; - - result = strchr(empty_string, 'a'); - if (result) - return 0x102; - - result = strchr(test_string, 'z'); - if (result) - return 0x103; - - return 0; -} - -static __init int strnchr_selftest(void) -{ - const char *test_string = "abcdefghijkl"; - const char *empty_string = ""; - char *result; - int i, j; - - for (i = 0; i < strlen(test_string) + 1; i++) { - for (j = 0; j < strlen(test_string) + 2; j++) { - result = strnchr(test_string, j, test_string[i]); - if (j <= i) { - if (!result) - continue; - return ((i + 'a') << 8) | j; - } - if (result - test_string != i) - return ((i + 'a') << 8) | j; - } - } - - result = strnchr(empty_string, 0, '\0'); - if (result) - return 0x10001; - - result = strnchr(empty_string, 1, '\0'); - if (result != empty_string) - return 0x10002; - - result = strnchr(empty_string, 1, 'a'); - if (result) - return 0x10003; - - result = strnchr(NULL, 0, '\0'); - if (result) - return 0x10004; - - return 0; -} - -static __init int strspn_selftest(void) -{ - static const struct strspn_test { - const char str[16]; - const char accept[16]; - const char reject[16]; - unsigned a; - unsigned r; - } tests[] __initconst = { - { "foobar", "", "", 0, 6 }, - { "abba", "abc", "ABBA", 4, 4 }, - { "abba", "a", "b", 1, 1 }, - { "", "abc", "abc", 0, 0}, - }; - const struct strspn_test *s = tests; - size_t i, res; - - for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) { - res = strspn(s->str, s->accept); - if (res != s->a) - return 0x100 + 2*i; - res = strcspn(s->str, s->reject); - if (res != s->r) - return 0x100 + 2*i + 1; - } - return 0; -} - -static __exit void string_selftest_remove(void) -{ -} - -static __init int string_selftest_init(void) -{ - int test, subtest; - - test = 1; - subtest = memset16_selftest(); - if (subtest) - goto fail; - - test = 2; - subtest = memset32_selftest(); - if (subtest) - goto fail; - - test = 3; - subtest = memset64_selftest(); - if (subtest) - goto fail; - - test = 4; - subtest = strchr_selftest(); - if (subtest) - goto fail; - - test = 5; - subtest = strnchr_selftest(); - if (subtest) - goto fail; - - test = 6; - subtest = strspn_selftest(); - if (subtest) - goto fail; - - pr_info("String selftests succeeded\n"); - return 0; -fail: - pr_crit("String selftest failure %d.%08x\n", test, subtest); - return 0; -} - -module_init(string_selftest_init); -module_exit(string_selftest_remove); -MODULE_LICENSE("GPL v2"); diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c index 8036aa91a1cb..9321d850931f 100644 --- a/lib/test_sysctl.c +++ b/lib/test_sysctl.c @@ -35,6 +35,8 @@ static struct { struct ctl_table_header *test_h_setup_node; struct ctl_table_header *test_h_mnt; struct ctl_table_header *test_h_mnterror; + struct ctl_table_header *empty_add; + struct ctl_table_header *empty; } sysctl_test_headers; struct test_sysctl_data { @@ -130,7 +132,6 @@ static struct ctl_table test_table[] = { .mode = 0644, .proc_handler = proc_do_large_bitmap, }, - { } }; static void test_sysctl_calc_match_int_ok(void) @@ -184,7 +185,6 @@ static struct ctl_table test_table_unregister[] = { .mode = 0644, .proc_handler = proc_dointvec_minmax, }, - {} }; static int test_sysctl_run_unregister_nested(void) @@ -220,6 +220,25 @@ static int test_sysctl_run_register_mount_point(void) return 0; } +static struct ctl_table test_table_empty[] = { }; + +static int test_sysctl_run_register_empty(void) +{ + /* Tets that an empty dir can be created */ + sysctl_test_headers.empty_add + = register_sysctl("debug/test_sysctl/empty_add", test_table_empty); + if (!sysctl_test_headers.empty_add) + return -ENOMEM; + + /* Test that register on top of an empty dir works */ + sysctl_test_headers.empty + = register_sysctl("debug/test_sysctl/empty_add/empty", test_table_empty); + if (!sysctl_test_headers.empty) + return -ENOMEM; + + return 0; +} + static int __init test_sysctl_init(void) { int err; @@ -233,6 +252,10 @@ static int __init test_sysctl_init(void) goto out; err = test_sysctl_run_register_mount_point(); + if (err) + goto out; + + err = test_sysctl_run_register_empty(); out: return err; @@ -248,6 +271,10 @@ static void __exit test_sysctl_exit(void) unregister_sysctl_table(sysctl_test_headers.test_h_mnt); if (sysctl_test_headers.test_h_mnterror) unregister_sysctl_table(sysctl_test_headers.test_h_mnterror); + if (sysctl_test_headers.empty) + unregister_sysctl_table(sysctl_test_headers.empty); + if (sysctl_test_headers.empty_add) + unregister_sysctl_table(sysctl_test_headers.empty_add); } module_exit(test_sysctl_exit); diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c index 2062be1f2e80..c288df9372ed 100644 --- a/lib/test_ubsan.c +++ b/lib/test_ubsan.c @@ -11,6 +11,39 @@ typedef void(*test_ubsan_fp)(void); #config, IS_ENABLED(config) ? "y" : "n"); \ } while (0) +static void test_ubsan_add_overflow(void) +{ + volatile int val = INT_MAX; + + UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP); + val += 2; +} + +static void test_ubsan_sub_overflow(void) +{ + volatile int val = INT_MIN; + volatile int val2 = 2; + + UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP); + val -= val2; +} + +static void test_ubsan_mul_overflow(void) +{ + volatile int val = INT_MAX / 2; + + UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP); + val *= 3; +} + +static void test_ubsan_negate_overflow(void) +{ + volatile int val = INT_MIN; + + UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP); + val = -val; +} + static void test_ubsan_divrem_overflow(void) { volatile int val = 16; @@ -23,8 +56,8 @@ static void test_ubsan_divrem_overflow(void) static void test_ubsan_shift_out_of_bounds(void) { volatile int neg = -1, wrap = 4; - int val1 = 10; - int val2 = INT_MAX; + volatile int val1 = 10; + volatile int val2 = INT_MAX; UBSAN_TEST(CONFIG_UBSAN_SHIFT, "negative exponent"); val1 <<= neg; @@ -90,6 +123,10 @@ static void test_ubsan_misaligned_access(void) } static const test_ubsan_fp test_ubsan_array[] = { + test_ubsan_add_overflow, + test_ubsan_sub_overflow, + test_ubsan_mul_overflow, + test_ubsan_negate_overflow, test_ubsan_shift_out_of_bounds, test_ubsan_out_of_bounds, test_ubsan_load_invalid_value, @@ -97,7 +134,7 @@ static const test_ubsan_fp test_ubsan_array[] = { }; /* Excluded because they Oops the module. */ -static const test_ubsan_fp skip_ubsan_array[] = { +static __used const test_ubsan_fp skip_ubsan_array[] = { test_ubsan_divrem_overflow, }; diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index 3718d9886407..4ddf769861ff 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -117,7 +117,7 @@ static int align_shift_alloc_test(void) int i; for (i = 0; i < BITS_PER_LONG; i++) { - align = ((unsigned long) 1) << i; + align = 1UL << i; ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0, __builtin_return_address(0)); @@ -501,7 +501,7 @@ static int test_func(void *private) } static int -init_test_configurtion(void) +init_test_configuration(void) { /* * A maximum number of workers is defined as hard-coded @@ -531,7 +531,7 @@ static void do_concurrent_test(void) /* * Set some basic configurations plus sanity check. */ - ret = init_test_configurtion(); + ret = init_test_configuration(); if (ret < 0) return; @@ -600,12 +600,7 @@ static int vmalloc_test_init(void) return -EAGAIN; /* Fail will directly unload the module */ } -static void vmalloc_test_exit(void) -{ -} - module_init(vmalloc_test_init) -module_exit(vmalloc_test_exit) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Uladzislau Rezki"); diff --git a/lib/test_xarray.c b/lib/test_xarray.c index e77d4856442c..928fc20337e6 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -423,6 +423,59 @@ static noinline void check_cmpxchg(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); } +static noinline void check_cmpxchg_order(struct xarray *xa) +{ +#ifdef CONFIG_XARRAY_MULTI + void *FIVE = xa_mk_value(5); + unsigned int i, order = 3; + + XA_BUG_ON(xa, xa_store_order(xa, 0, order, FIVE, GFP_KERNEL)); + + /* Check entry FIVE has the order saved */ + XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != order); + + /* Check all the tied indexes have the same entry and order */ + for (i = 0; i < (1 << order); i++) { + XA_BUG_ON(xa, xa_load(xa, i) != FIVE); + XA_BUG_ON(xa, xa_get_order(xa, i) != order); + } + + /* Ensure that nothing is stored at index '1 << order' */ + XA_BUG_ON(xa, xa_load(xa, 1 << order) != NULL); + + /* + * Additionally, keep the node information and the order at + * '1 << order' + */ + XA_BUG_ON(xa, xa_store_order(xa, 1 << order, order, FIVE, GFP_KERNEL)); + for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) { + XA_BUG_ON(xa, xa_load(xa, i) != FIVE); + XA_BUG_ON(xa, xa_get_order(xa, i) != order); + } + + /* Conditionally replace FIVE entry at index '0' with NULL */ + XA_BUG_ON(xa, xa_cmpxchg(xa, 0, FIVE, NULL, GFP_KERNEL) != FIVE); + + /* Verify the order is lost at FIVE (and old) entries */ + XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != 0); + + /* Verify the order and entries are lost in all the tied indexes */ + for (i = 0; i < (1 << order); i++) { + XA_BUG_ON(xa, xa_load(xa, i) != NULL); + XA_BUG_ON(xa, xa_get_order(xa, i) != 0); + } + + /* Verify node and order are kept at '1 << order' */ + for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) { + XA_BUG_ON(xa, xa_load(xa, i) != FIVE); + XA_BUG_ON(xa, xa_get_order(xa, i) != order); + } + + xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); + XA_BUG_ON(xa, !xa_empty(xa)); +#endif +} + static noinline void check_reserve(struct xarray *xa) { void *entry; @@ -674,6 +727,186 @@ static noinline void check_multi_store(struct xarray *xa) #endif } +#ifdef CONFIG_XARRAY_MULTI +/* mimics page cache __filemap_add_folio() */ +static noinline void check_xa_multi_store_adv_add(struct xarray *xa, + unsigned long index, + unsigned int order, + void *p) +{ + XA_STATE(xas, xa, index); + unsigned int nrpages = 1UL << order; + + /* users are responsible for index alignemnt to the order when adding */ + XA_BUG_ON(xa, index & (nrpages - 1)); + + xas_set_order(&xas, index, order); + + do { + xas_lock_irq(&xas); + xas_store(&xas, p); + xas_unlock_irq(&xas); + /* + * In our selftest case the only failure we can expect is for + * there not to be enough memory as we're not mimicking the + * entire page cache, so verify that's the only error we can run + * into here. The xas_nomem() which follows will ensure to fix + * that condition for us so to chug on on the loop. + */ + XA_BUG_ON(xa, xas_error(&xas) && xas_error(&xas) != -ENOMEM); + } while (xas_nomem(&xas, GFP_KERNEL)); + + XA_BUG_ON(xa, xas_error(&xas)); + XA_BUG_ON(xa, xa_load(xa, index) != p); +} + +/* mimics page_cache_delete() */ +static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa, + unsigned long index, + unsigned int order) +{ + XA_STATE(xas, xa, index); + + xas_set_order(&xas, index, order); + xas_store(&xas, NULL); + xas_init_marks(&xas); +} + +static noinline void check_xa_multi_store_adv_delete(struct xarray *xa, + unsigned long index, + unsigned int order) +{ + xa_lock_irq(xa); + check_xa_multi_store_adv_del_entry(xa, index, order); + xa_unlock_irq(xa); +} + +/* mimics page cache filemap_get_entry() */ +static noinline void *test_get_entry(struct xarray *xa, unsigned long index) +{ + XA_STATE(xas, xa, index); + void *p; + static unsigned int loops = 0; + + rcu_read_lock(); +repeat: + xas_reset(&xas); + p = xas_load(&xas); + if (xas_retry(&xas, p)) + goto repeat; + rcu_read_unlock(); + + /* + * This is not part of the page cache, this selftest is pretty + * aggressive and does not want to trust the xarray API but rather + * test it, and for order 20 (4 GiB block size) we can loop over + * over a million entries which can cause a soft lockup. Page cache + * APIs won't be stupid, proper page cache APIs loop over the proper + * order so when using a larger order we skip shared entries. + */ + if (++loops % XA_CHECK_SCHED == 0) + schedule(); + + return p; +} + +static unsigned long some_val = 0xdeadbeef; +static unsigned long some_val_2 = 0xdeaddead; + +/* mimics the page cache usage */ +static noinline void check_xa_multi_store_adv(struct xarray *xa, + unsigned long pos, + unsigned int order) +{ + unsigned int nrpages = 1UL << order; + unsigned long index, base, next_index, next_next_index; + unsigned int i; + + index = pos >> PAGE_SHIFT; + base = round_down(index, nrpages); + next_index = round_down(base + nrpages, nrpages); + next_next_index = round_down(next_index + nrpages, nrpages); + + check_xa_multi_store_adv_add(xa, base, order, &some_val); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val); + + XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL); + + /* Use order 0 for the next item */ + check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2); + XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2); + + /* Remove the next item */ + check_xa_multi_store_adv_delete(xa, next_index, 0); + + /* Now use order for a new pointer */ + check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); + + check_xa_multi_store_adv_delete(xa, next_index, order); + check_xa_multi_store_adv_delete(xa, base, order); + XA_BUG_ON(xa, !xa_empty(xa)); + + /* starting fresh again */ + + /* let's test some holes now */ + + /* hole at base and next_next */ + check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL); + + check_xa_multi_store_adv_delete(xa, next_index, order); + XA_BUG_ON(xa, !xa_empty(xa)); + + /* hole at base and next */ + + check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != NULL); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2); + + check_xa_multi_store_adv_delete(xa, next_next_index, order); + XA_BUG_ON(xa, !xa_empty(xa)); +} +#endif + +static noinline void check_multi_store_advanced(struct xarray *xa) +{ +#ifdef CONFIG_XARRAY_MULTI + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; + unsigned long end = ULONG_MAX/2; + unsigned long pos, i; + + /* + * About 117 million tests below. + */ + for (pos = 7; pos < end; pos = (pos * pos) + 564) { + for (i = 0; i < max_order; i++) { + check_xa_multi_store_adv(xa, pos, i); + check_xa_multi_store_adv(xa, pos + 157, i); + } + } +#endif +} + static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) { int i; @@ -1555,9 +1788,11 @@ static void check_split_1(struct xarray *xa, unsigned long index, unsigned int order, unsigned int new_order) { XA_STATE_ORDER(xas, xa, index, new_order); - unsigned int i; + unsigned int i, found; + void *entry; xa_store_order(xa, index, order, xa, GFP_KERNEL); + xa_set_mark(xa, index, XA_MARK_1); xas_split_alloc(&xas, xa, order, GFP_KERNEL); xas_lock(&xas); @@ -1574,6 +1809,16 @@ static void check_split_1(struct xarray *xa, unsigned long index, xa_set_mark(xa, index, XA_MARK_0); XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); + xas_set_order(&xas, index, 0); + found = 0; + rcu_read_lock(); + xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) { + found++; + XA_BUG_ON(xa, xa_is_internal(entry)); + } + rcu_read_unlock(); + XA_BUG_ON(xa, found != 1 << (order - new_order)); + xa_destroy(xa); } @@ -1801,9 +2046,11 @@ static int xarray_checks(void) check_xas_erase(&array); check_insert(&array); check_cmpxchg(&array); + check_cmpxchg_order(&array); check_reserve(&array); check_reserve(&xa0); check_multi_store(&array); + check_multi_store_advanced(&array); check_get_order(&array); check_xa_alloc(); check_find(&array); diff --git a/lib/trace_readwrite.c b/lib/trace_readwrite.c index 62b4e8b3c733..a94cd56a1e4c 100644 --- a/lib/trace_readwrite.c +++ b/lib/trace_readwrite.c @@ -7,7 +7,7 @@ #include <linux/ftrace.h> #include <linux/module.h> -#include <asm-generic/io.h> +#include <linux/io.h> #define CREATE_TRACE_POINTS #include <trace/events/rwmmio.h> diff --git a/lib/ubsan.c b/lib/ubsan.c index 3f90810f9f42..a1c983d148f1 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -44,9 +44,10 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type) case ubsan_shift_out_of_bounds: return "UBSAN: shift out of bounds"; #endif -#ifdef CONFIG_UBSAN_DIV_ZERO +#if defined(CONFIG_UBSAN_DIV_ZERO) || defined(CONFIG_UBSAN_SIGNED_WRAP) /* - * SanitizerKind::IntegerDivideByZero emits + * SanitizerKind::IntegerDivideByZero and + * SanitizerKind::SignedIntegerOverflow emit * SanitizerHandler::DivremOverflow. */ case ubsan_divrem_overflow: @@ -78,6 +79,19 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type) case ubsan_type_mismatch: return "UBSAN: type mismatch"; #endif +#ifdef CONFIG_UBSAN_SIGNED_WRAP + /* + * SanitizerKind::SignedIntegerOverflow emits + * SanitizerHandler::AddOverflow, SanitizerHandler::SubOverflow, + * or SanitizerHandler::MulOverflow. + */ + case ubsan_add_overflow: + return "UBSAN: integer addition overflow"; + case ubsan_sub_overflow: + return "UBSAN: integer subtraction overflow"; + case ubsan_mul_overflow: + return "UBSAN: integer multiplication overflow"; +#endif default: return "UBSAN: unrecognized failure code"; } @@ -204,8 +218,8 @@ static void ubsan_prologue(struct source_location *loc, const char *reason) { current->in_ubsan++; - pr_err("========================================" - "========================================\n"); + pr_warn(CUT_HERE); + pr_err("UBSAN: %s in %s:%d:%d\n", reason, loc->file_name, loc->line & LINE_MASK, loc->column & COLUMN_MASK); @@ -215,14 +229,81 @@ static void ubsan_prologue(struct source_location *loc, const char *reason) static void ubsan_epilogue(void) { dump_stack(); - pr_err("========================================" - "========================================\n"); + pr_warn("---[ end trace ]---\n"); current->in_ubsan--; check_panic_on_warn("UBSAN"); } +static void handle_overflow(struct overflow_data *data, void *lhs, + void *rhs, char op) +{ + + struct type_descriptor *type = data->type; + char lhs_val_str[VALUE_LENGTH]; + char rhs_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, type_is_signed(type) ? + "signed-integer-overflow" : + "unsigned-integer-overflow"); + + val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs); + val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs); + pr_err("%s %c %s cannot be represented in type %s\n", + lhs_val_str, + op, + rhs_val_str, + type->type_name); + + ubsan_epilogue(); +} + +void __ubsan_handle_add_overflow(void *data, + void *lhs, void *rhs) +{ + + handle_overflow(data, lhs, rhs, '+'); +} +EXPORT_SYMBOL(__ubsan_handle_add_overflow); + +void __ubsan_handle_sub_overflow(void *data, + void *lhs, void *rhs) +{ + handle_overflow(data, lhs, rhs, '-'); +} +EXPORT_SYMBOL(__ubsan_handle_sub_overflow); + +void __ubsan_handle_mul_overflow(void *data, + void *lhs, void *rhs) +{ + handle_overflow(data, lhs, rhs, '*'); +} +EXPORT_SYMBOL(__ubsan_handle_mul_overflow); + +void __ubsan_handle_negate_overflow(void *_data, void *old_val) +{ + struct overflow_data *data = _data; + char old_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, "negation-overflow"); + + val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val); + + pr_err("negation of %s cannot be represented in type %s:\n", + old_val_str, data->type->type_name); + + ubsan_epilogue(); +} +EXPORT_SYMBOL(__ubsan_handle_negate_overflow); + + void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs) { struct overflow_data *data = _data; diff --git a/lib/ubsan.h b/lib/ubsan.h index 5d99ab81913b..07e37d4429b4 100644 --- a/lib/ubsan.h +++ b/lib/ubsan.h @@ -43,7 +43,7 @@ enum { struct type_descriptor { u16 type_kind; u16 type_info; - char type_name[1]; + char type_name[]; }; struct source_location { @@ -124,15 +124,32 @@ typedef s64 s_max; typedef u64 u_max; #endif -void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs); -void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr); -void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr); -void __ubsan_handle_out_of_bounds(void *_data, void *index); -void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs); -void __ubsan_handle_builtin_unreachable(void *_data); -void __ubsan_handle_load_invalid_value(void *_data, void *val); -void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr, - unsigned long align, - unsigned long offset); +/* + * When generating Runtime Calls, Clang doesn't respect the -mregparm=3 + * option used on i386: https://github.com/llvm/llvm-project/issues/89670 + * Fix this for earlier Clang versions by forcing the calling convention + * to use non-register arguments. + */ +#if defined(CONFIG_X86_32) && \ + defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 190000 +# define ubsan_linkage asmlinkage +#else +# define ubsan_linkage +#endif + +void ubsan_linkage __ubsan_handle_add_overflow(void *data, void *lhs, void *rhs); +void ubsan_linkage __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs); +void ubsan_linkage __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs); +void ubsan_linkage __ubsan_handle_negate_overflow(void *_data, void *old_val); +void ubsan_linkage __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs); +void ubsan_linkage __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr); +void ubsan_linkage __ubsan_handle_type_mismatch_v1(void *_data, void *ptr); +void ubsan_linkage __ubsan_handle_out_of_bounds(void *_data, void *index); +void ubsan_linkage __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs); +void ubsan_linkage __ubsan_handle_builtin_unreachable(void *_data); +void ubsan_linkage __ubsan_handle_load_invalid_value(void *_data, void *val); +void ubsan_linkage __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr, + unsigned long align, + unsigned long offset); #endif diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig index d883ac299508..c46c2300517c 100644 --- a/lib/vdso/Kconfig +++ b/lib/vdso/Kconfig @@ -30,4 +30,11 @@ config GENERIC_VDSO_TIME_NS Selected by architectures which support time namespaces in the VDSO +config GENERIC_VDSO_OVERFLOW_PROTECT + bool + help + Select to add multiplication overflow protection to the VDSO + time getter functions for the price of an extra conditional + in the hotpath. + endif diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c index ce2f69552003..899850bd6f0b 100644 --- a/lib/vdso/gettimeofday.c +++ b/lib/vdso/gettimeofday.c @@ -5,15 +5,23 @@ #include <vdso/datapage.h> #include <vdso/helpers.h> -#ifndef vdso_calc_delta -/* - * Default implementation which works for all sane clocksources. That - * obviously excludes x86/TSC. - */ -static __always_inline -u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) +#ifndef vdso_calc_ns + +#ifdef VDSO_DELTA_NOMASK +# define VDSO_DELTA_MASK(vd) ULLONG_MAX +#else +# define VDSO_DELTA_MASK(vd) (vd->mask) +#endif + +#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT +static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta) +{ + return delta < vd->max_cycles; +} +#else +static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta) { - return ((cycles - last) & mask) * mult; + return true; } #endif @@ -24,6 +32,21 @@ static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift) } #endif +/* + * Default implementation which works for all sane clocksources. That + * obviously excludes x86/TSC. + */ +static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base) +{ + u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd); + + if (likely(vdso_delta_ok(vd, delta))) + return vdso_shift_ns((delta * vd->mult) + base, vd->shift); + + return mul_u64_u32_add_u64_shr(delta, vd->mult, base, vd->shift); +} +#endif /* vdso_calc_ns */ + #ifndef __arch_vdso_hres_capable static inline bool __arch_vdso_hres_capable(void) { @@ -49,10 +72,10 @@ static inline bool vdso_cycles_ok(u64 cycles) static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, struct __kernel_timespec *ts) { - const struct vdso_data *vd; const struct timens_offset *offs = &vdns->offset[clk]; const struct vdso_timestamp *vdso_ts; - u64 cycles, last, ns; + const struct vdso_data *vd; + u64 cycles, ns; u32 seq; s64 sec; @@ -73,10 +96,7 @@ static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_ cycles = __arch_get_hw_counter(vd->clock_mode, vd); if (unlikely(!vdso_cycles_ok(cycles))) return -1; - ns = vdso_ts->nsec; - last = vd->cycle_last; - ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); - ns = vdso_shift_ns(ns, vd->shift); + ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec); sec = vdso_ts->sec; } while (unlikely(vdso_read_retry(vd, seq))); @@ -111,7 +131,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk, struct __kernel_timespec *ts) { const struct vdso_timestamp *vdso_ts = &vd->basetime[clk]; - u64 cycles, last, sec, ns; + u64 cycles, sec, ns; u32 seq; /* Allows to compile the high resolution parts out */ @@ -144,10 +164,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk, cycles = __arch_get_hw_counter(vd->clock_mode, vd); if (unlikely(!vdso_cycles_ok(cycles))) return -1; - ns = vdso_ts->nsec; - last = vd->cycle_last; - ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); - ns = vdso_shift_ns(ns, vd->shift); + ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec); sec = vdso_ts->sec; } while (unlikely(vdso_read_retry(vd, seq))); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index afb88b24fa74..552738f14275 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -60,7 +60,8 @@ bool no_hash_pointers __ro_after_init; EXPORT_SYMBOL_GPL(no_hash_pointers); -static noinline unsigned long long simple_strntoull(const char *startp, size_t max_chars, char **endp, unsigned int base) +noinline +static unsigned long long simple_strntoull(const char *startp, char **endp, unsigned int base, size_t max_chars) { const char *cp; unsigned long long result = 0ULL; @@ -95,7 +96,7 @@ static noinline unsigned long long simple_strntoull(const char *startp, size_t m noinline unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) { - return simple_strntoull(cp, INT_MAX, endp, base); + return simple_strntoull(cp, endp, base, INT_MAX); } EXPORT_SYMBOL(simple_strtoull); @@ -130,8 +131,8 @@ long simple_strtol(const char *cp, char **endp, unsigned int base) } EXPORT_SYMBOL(simple_strtol); -static long long simple_strntoll(const char *cp, size_t max_chars, char **endp, - unsigned int base) +noinline +static long long simple_strntoll(const char *cp, char **endp, unsigned int base, size_t max_chars) { /* * simple_strntoull() safely handles receiving max_chars==0 in the @@ -140,9 +141,9 @@ static long long simple_strntoll(const char *cp, size_t max_chars, char **endp, * and the content of *cp is irrelevant. */ if (*cp == '-' && max_chars > 0) - return -simple_strntoull(cp + 1, max_chars - 1, endp, base); + return -simple_strntoull(cp + 1, endp, base, max_chars - 1); - return simple_strntoull(cp, max_chars, endp, base); + return simple_strntoull(cp, endp, base, max_chars); } /** @@ -155,7 +156,7 @@ static long long simple_strntoll(const char *cp, size_t max_chars, char **endp, */ long long simple_strtoll(const char *cp, char **endp, unsigned int base) { - return simple_strntoll(cp, INT_MAX, endp, base); + return simple_strntoll(cp, endp, base, INT_MAX); } EXPORT_SYMBOL(simple_strtoll); @@ -2110,15 +2111,20 @@ char *fwnode_full_name_string(struct fwnode_handle *fwnode, char *buf, /* Loop starting from the root node to the current node. */ for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) { - struct fwnode_handle *__fwnode = - fwnode_get_nth_parent(fwnode, depth); + /* + * Only get a reference for other nodes (i.e. parent nodes). + * fwnode refcount may be 0 here. + */ + struct fwnode_handle *__fwnode = depth ? + fwnode_get_nth_parent(fwnode, depth) : fwnode; buf = string(buf, end, fwnode_get_name_prefix(__fwnode), default_str_spec); buf = string(buf, end, fwnode_get_name(__fwnode), default_str_spec); - fwnode_handle_put(__fwnode); + if (depth) + fwnode_handle_put(__fwnode); } return buf; @@ -3648,13 +3654,11 @@ int vsscanf(const char *buf, const char *fmt, va_list args) break; if (is_sign) - val.s = simple_strntoll(str, - field_width >= 0 ? field_width : INT_MAX, - &next, base); + val.s = simple_strntoll(str, &next, base, + field_width >= 0 ? field_width : INT_MAX); else - val.u = simple_strntoull(str, - field_width >= 0 ? field_width : INT_MAX, - &next, base); + val.u = simple_strntoull(str, &next, base, + field_width >= 0 ? field_width : INT_MAX); switch (qualifier) { case 'H': /* that's 'hh' in format */ diff --git a/lib/xarray.c b/lib/xarray.c index 39f07bfc4dcc..5e7d6334d70d 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -969,8 +969,22 @@ static unsigned int node_get_marks(struct xa_node *node, unsigned int offset) return marks; } +static inline void node_mark_slots(struct xa_node *node, unsigned int sibs, + xa_mark_t mark) +{ + int i; + + if (sibs == 0) + node_mark_all(node, mark); + else { + for (i = 0; i < XA_CHUNK_SIZE; i += sibs + 1) + node_set_mark(node, i, mark); + } +} + static void node_set_marks(struct xa_node *node, unsigned int offset, - struct xa_node *child, unsigned int marks) + struct xa_node *child, unsigned int sibs, + unsigned int marks) { xa_mark_t mark = XA_MARK_0; @@ -978,7 +992,7 @@ static void node_set_marks(struct xa_node *node, unsigned int offset, if (marks & (1 << (__force unsigned int)mark)) { node_set_mark(node, offset, mark); if (child) - node_mark_all(child, mark); + node_mark_slots(child, sibs, mark); } if (mark == XA_MARK_MAX) break; @@ -1077,7 +1091,8 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order) child->nr_values = xa_is_value(entry) ? XA_CHUNK_SIZE : 0; RCU_INIT_POINTER(child->parent, node); - node_set_marks(node, offset, child, marks); + node_set_marks(node, offset, child, xas->xa_sibs, + marks); rcu_assign_pointer(node->slots[offset], xa_mk_node(child)); if (xa_is_value(curr)) @@ -1086,7 +1101,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order) } else { unsigned int canon = offset - xas->xa_sibs; - node_set_marks(node, canon, NULL, marks); + node_set_marks(node, canon, NULL, 0, marks); rcu_assign_pointer(node->slots[canon], entry); while (offset > canon) rcu_assign_pointer(node->slots[offset--], diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c index a0d06095be83..8dcb8ca39767 100644 --- a/lib/zstd/common/fse_decompress.c +++ b/lib/zstd/common/fse_decompress.c @@ -312,7 +312,7 @@ size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size typedef struct { short ncount[FSE_MAX_SYMBOL_VALUE + 1]; - FSE_DTable dtable[1]; /* Dynamically sized */ + FSE_DTable dtable[]; /* Dynamically sized */ } FSE_DecompressWksp; |