diff options
Diffstat (limited to 'lib')
56 files changed, 2240 insertions, 1310 deletions
diff --git a/lib/.gitignore b/lib/.gitignore index e5e217b8307b..54596b634ecb 100644 --- a/lib/.gitignore +++ b/lib/.gitignore @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only /crc32table.h /crc64table.h +/default.bconf /gen_crc32table /gen_crc64table /oid_registry_data.c diff --git a/lib/Kconfig b/lib/Kconfig index 6a843639814f..eaaad4d85bf2 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -120,6 +120,9 @@ config INDIRECT_IOMEM_FALLBACK source "lib/crypto/Kconfig" +config LIB_MEMNEQ + bool + config CRC_CCITT tristate "CRC-CCITT functions" help diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a30d5279efda..35cd8287642a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -498,7 +498,7 @@ config STACK_VALIDATION runtime stack traces are more reliable. For more information, see - tools/objtool/Documentation/stack-validation.txt. + tools/objtool/Documentation/objtool.txt. config NOINSTR_VALIDATION bool @@ -699,40 +699,6 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT help Debug objects boot parameter default value -config DEBUG_SLAB - bool "Debug slab memory allocations" - depends on DEBUG_KERNEL && SLAB - help - Say Y here to have the kernel do limited verification on memory - allocation as well as poisoning memory on free to catch use of freed - memory. This can make kmalloc/kfree-intensive workloads much slower. - -config SLUB_DEBUG_ON - bool "SLUB debugging on by default" - depends on SLUB && SLUB_DEBUG - default n - help - Boot with debugging on by default. SLUB boots by default with - the runtime debug capabilities switched off. Enabling this is - equivalent to specifying the "slub_debug" parameter on boot. - There is no support for more fine grained debug control like - possible with slub_debug=xxx. SLUB debugging may be switched - off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying - "slub_debug=-". - -config SLUB_STATS - default n - bool "Enable SLUB performance statistics" - depends on SLUB && SYSFS - help - SLUB statistics are useful to debug SLUBs allocation behavior in - order find ways to optimize the allocator. This should never be - enabled for production use since keeping statistics slows down - the allocator by a few percentage points. The slabinfo command - supports the determination of the most active slabs to figure - out which slabs are relevant to a particular load. - Try running: slabinfo -DA - config HAVE_DEBUG_KMEMLEAK bool @@ -1072,13 +1038,6 @@ config BOOTPARAM_SOFTLOCKUP_PANIC Say N if unsure. -config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE - int - depends on SOFTLOCKUP_DETECTOR - range 0 1 - default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC - default 1 if BOOTPARAM_SOFTLOCKUP_PANIC - config HARDLOCKUP_DETECTOR_PERF bool select SOFTLOCKUP_DETECTOR @@ -1120,13 +1079,6 @@ config BOOTPARAM_HARDLOCKUP_PANIC Say N if unsure. -config BOOTPARAM_HARDLOCKUP_PANIC_VALUE - int - depends on HARDLOCKUP_DETECTOR - range 0 1 - default 0 if !BOOTPARAM_HARDLOCKUP_PANIC - default 1 if BOOTPARAM_HARDLOCKUP_PANIC - config DETECT_HUNG_TASK bool "Detect Hung Tasks" depends on DEBUG_KERNEL @@ -1174,13 +1126,6 @@ config BOOTPARAM_HUNG_TASK_PANIC Say N if unsure. -config BOOTPARAM_HUNG_TASK_PANIC_VALUE - int - depends on DETECT_HUNG_TASK - range 0 1 - default 0 if !BOOTPARAM_HUNG_TASK_PANIC - default 1 if BOOTPARAM_HUNG_TASK_PANIC - config WQ_WATCHDOG bool "Detect Workqueue Stalls" depends on DEBUG_KERNEL @@ -1545,29 +1490,6 @@ config CSD_LOCK_WAIT_DEBUG include the IPI handler function currently executing (if any) and relevant stack traces. -choice - prompt "Lock debugging: prove subsystem device_lock() correctness" - depends on PROVE_LOCKING - help - For subsystems that have instrumented their usage of the device_lock() - with nested annotations, enable lock dependency checking. The locking - hierarchy 'subclass' identifiers are not compatible across - sub-systems, so only one can be enabled at a time. - -config PROVE_NVDIMM_LOCKING - bool "NVDIMM" - depends on LIBNVDIMM - help - Enable lockdep to validate nd_device_lock() usage. - -config PROVE_CXL_LOCKING - bool "CXL" - depends on CXL_BUS - help - Enable lockdep to validate cxl_device_lock() usage. - -endchoice - endmenu # lock debugging config TRACE_IRQFLAGS @@ -1638,7 +1560,7 @@ config DEBUG_KOBJECT_RELEASE help kobjects are reference counted objects. This means that their last reference count put is not predictable, and the kobject can - live on past the point at which a driver decides to drop it's + live on past the point at which a driver decides to drop its initial reference to the kobject gained on allocation. An example of this would be a struct device which has just been unregistered. @@ -2141,10 +2063,11 @@ config TEST_DIV64 If unsure, say N. config KPROBES_SANITY_TEST - tristate "Kprobes sanity tests" + tristate "Kprobes sanity tests" if !KUNIT_ALL_TESTS depends on DEBUG_KERNEL depends on KPROBES depends on KUNIT + default KUNIT_ALL_TESTS help This option provides for testing basic kprobes functionality on boot. Samples of kprobe and kretprobe are inserted and @@ -2418,8 +2341,9 @@ config TEST_SYSCTL If unsure, say N. config BITFIELD_KUNIT - tristate "KUnit test bitfield functions at runtime" + tristate "KUnit test bitfield functions at runtime" if !KUNIT_ALL_TESTS depends on KUNIT + default KUNIT_ALL_TESTS help Enable this option to test the bitfield functions at boot. @@ -2453,8 +2377,9 @@ config HASH_KUNIT_TEST optimized versions. If unsure, say N. config RESOURCE_KUNIT_TEST - tristate "KUnit test for resource API" + tristate "KUnit test for resource API" if !KUNIT_ALL_TESTS depends on KUNIT + default KUNIT_ALL_TESTS help This builds the resource API unit test. Tests the logic of API provided by resource.c and ioport.h. @@ -2507,8 +2432,9 @@ config LINEAR_RANGES_TEST If unsure, say N. config CMDLINE_KUNIT_TEST - tristate "KUnit test for cmdline API" + tristate "KUnit test for cmdline API" if !KUNIT_ALL_TESTS depends on KUNIT + default KUNIT_ALL_TESTS help This builds the cmdline API unit test. Tests the logic of API provided by cmdline.c. @@ -2518,8 +2444,9 @@ config CMDLINE_KUNIT_TEST If unsure, say N. config BITS_TEST - tristate "KUnit test for bits.h" + tristate "KUnit test for bits.h" if !KUNIT_ALL_TESTS depends on KUNIT + default KUNIT_ALL_TESTS help This builds the bits unit test. Tests the logic of macros defined in bits.h. diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 1f3e620188a2..f0973da583e0 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only + # This config refers to the generic KASAN mode. config HAVE_ARCH_KASAN bool @@ -15,9 +16,8 @@ config HAVE_ARCH_KASAN_VMALLOC config ARCH_DISABLE_KASAN_INLINE bool help - An architecture might not support inline instrumentation. - When this option is selected, inline and stack instrumentation are - disabled. + Disables both inline and stack instrumentation. Selected by + architectures that do not support these instrumentation types. config CC_HAS_KASAN_GENERIC def_bool $(cc-option, -fsanitize=kernel-address) @@ -26,13 +26,13 @@ config CC_HAS_KASAN_SW_TAGS def_bool $(cc-option, -fsanitize=kernel-hwaddress) # This option is only required for software KASAN modes. -# Old GCC versions don't have proper support for no_sanitize_address. +# Old GCC versions do not have proper support for no_sanitize_address. # See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89124 for details. config CC_HAS_WORKING_NOSANITIZE_ADDRESS def_bool !CC_IS_GCC || GCC_VERSION >= 80300 menuconfig KASAN - bool "KASAN: runtime memory debugger" + bool "KASAN: dynamic memory safety error detector" depends on (((HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \ (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \ CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \ @@ -40,10 +40,13 @@ menuconfig KASAN depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) select STACKDEPOT_ALWAYS_INIT help - Enables KASAN (KernelAddressSANitizer) - runtime memory debugger, - designed to find out-of-bounds accesses and use-after-free bugs. + Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety + error detector designed to find out-of-bounds and use-after-free bugs. + See Documentation/dev-tools/kasan.rst for details. + For better error reports, also enable CONFIG_STACKTRACE. + if KASAN choice @@ -51,75 +54,71 @@ choice default KASAN_GENERIC help KASAN has three modes: - 1. generic KASAN (similar to userspace ASan, - x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC), - 2. software tag-based KASAN (arm64 only, based on software - memory tagging (similar to userspace HWASan), enabled with - CONFIG_KASAN_SW_TAGS), and - 3. hardware tag-based KASAN (arm64 only, based on hardware - memory tagging, enabled with CONFIG_KASAN_HW_TAGS). - All KASAN modes are strictly debugging features. + 1. Generic KASAN (supported by many architectures, enabled with + CONFIG_KASAN_GENERIC, similar to userspace ASan), + 2. Software Tag-Based KASAN (arm64 only, based on software memory + tagging, enabled with CONFIG_KASAN_SW_TAGS, similar to userspace + HWASan), and + 3. Hardware Tag-Based KASAN (arm64 only, based on hardware memory + tagging, enabled with CONFIG_KASAN_HW_TAGS). - For better error reports enable CONFIG_STACKTRACE. + See Documentation/dev-tools/kasan.rst for details about each mode. config KASAN_GENERIC - bool "Generic mode" + bool "Generic KASAN" depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS select SLUB_DEBUG if SLUB select CONSTRUCTORS help - Enables generic KASAN mode. + Enables Generic KASAN. - This mode is supported in both GCC and Clang. With GCC it requires - version 8.3.0 or later. Any supported Clang version is compatible, - but detection of out-of-bounds accesses for global variables is - supported only since Clang 11. + Requires GCC 8.3.0+ or Clang. - This mode consumes about 1/8th of available memory at kernel start - and introduces an overhead of ~x1.5 for the rest of the allocations. + Consumes about 1/8th of available memory at kernel start and adds an + overhead of ~50% for dynamic allocations. The performance slowdown is ~x3. - Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB - (the resulting kernel does not boot). + (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.) config KASAN_SW_TAGS - bool "Software tag-based mode" + bool "Software Tag-Based KASAN" depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS select SLUB_DEBUG if SLUB select CONSTRUCTORS help - Enables software tag-based KASAN mode. + Enables Software Tag-Based KASAN. - This mode require software memory tagging support in the form of - HWASan-like compiler instrumentation. + Requires GCC 11+ or Clang. - Currently this mode is only implemented for arm64 CPUs and relies on - Top Byte Ignore. This mode requires Clang. + Supported only on arm64 CPUs and relies on Top Byte Ignore. - This mode consumes about 1/16th of available memory at kernel start - and introduces an overhead of ~20% for the rest of the allocations. - This mode may potentially introduce problems relating to pointer - casting and comparison, as it embeds tags into the top byte of each - pointer. + Consumes about 1/16th of available memory at kernel start and + add an overhead of ~20% for dynamic allocations. - Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB - (the resulting kernel does not boot). + May potentially introduce problems related to pointer casting and + comparison, as it embeds a tag into the top byte of each pointer. + + (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.) config KASAN_HW_TAGS - bool "Hardware tag-based mode" + bool "Hardware Tag-Based KASAN" depends on HAVE_ARCH_KASAN_HW_TAGS depends on SLUB help - Enables hardware tag-based KASAN mode. + Enables Hardware Tag-Based KASAN. + + Requires GCC 10+ or Clang 12+. - This mode requires hardware memory tagging support, and can be used - by any architecture that provides it. + Supported only on arm64 CPUs starting from ARMv8.5 and relies on + Memory Tagging Extension and Top Byte Ignore. - Currently this mode is only implemented for arm64 CPUs starting from - ARMv8.5 and relies on Memory Tagging Extension and Top Byte Ignore. + Consumes about 1/32nd of available memory. + + May potentially introduce problems related to pointer casting and + comparison, as it embeds a tag into the top byte of each pointer. endchoice @@ -131,83 +130,80 @@ choice config KASAN_OUTLINE bool "Outline instrumentation" help - Before every memory access compiler insert function call - __asan_load*/__asan_store*. These functions performs check - of shadow memory. This is slower than inline instrumentation, - however it doesn't bloat size of kernel's .text section so - much as inline does. + Makes the compiler insert function calls that check whether the memory + is accessible before each memory access. Slower than KASAN_INLINE, but + does not bloat the size of the kernel's .text section so much. config KASAN_INLINE bool "Inline instrumentation" depends on !ARCH_DISABLE_KASAN_INLINE help - Compiler directly inserts code checking shadow memory before - memory accesses. This is faster than outline (in some workloads - it gives about x2 boost over outline instrumentation), but - make kernel's .text size much bigger. + Makes the compiler directly insert memory accessibility checks before + each memory access. Faster than KASAN_OUTLINE (gives ~x2 boost for + some workloads), but makes the kernel's .text size much bigger. endchoice config KASAN_STACK - bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST + bool "Stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST depends on KASAN_GENERIC || KASAN_SW_TAGS depends on !ARCH_DISABLE_KASAN_INLINE default y if CC_IS_GCC help - The LLVM stack address sanitizer has a know problem that - causes excessive stack usage in a lot of functions, see - https://bugs.llvm.org/show_bug.cgi?id=38809 - Disabling asan-stack makes it safe to run kernels build - with clang-8 with KASAN enabled, though it loses some of - the functionality. - This feature is always disabled when compile-testing with clang - to avoid cluttering the output in stack overflow warnings, - but clang users can still enable it for builds without - CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe - to use and enabled by default. - If the architecture disables inline instrumentation, stack - instrumentation is also disabled as it adds inline-style - instrumentation that is run unconditionally. + Disables stack instrumentation and thus KASAN's ability to detect + out-of-bounds bugs in stack variables. + + With Clang, stack instrumentation has a problem that causes excessive + stack usage, see https://bugs.llvm.org/show_bug.cgi?id=38809. Thus, + with Clang, this option is deemed unsafe. + + This option is always disabled when compile-testing with Clang to + avoid cluttering the log with stack overflow warnings. + + With GCC, enabling stack instrumentation is assumed to be safe. + + If the architecture disables inline instrumentation via + ARCH_DISABLE_KASAN_INLINE, stack instrumentation gets disabled + as well, as it adds inline-style instrumentation that is run + unconditionally. config KASAN_TAGS_IDENTIFY - bool "Enable memory corruption identification" + bool "Memory corruption type identification" depends on KASAN_SW_TAGS || KASAN_HW_TAGS help - This option enables best-effort identification of bug type - (use-after-free or out-of-bounds) at the cost of increased - memory consumption. + Enables best-effort identification of the bug types (use-after-free + or out-of-bounds) at the cost of increased memory consumption. + Only applicable for the tag-based KASAN modes. config KASAN_VMALLOC bool "Check accesses to vmalloc allocations" depends on HAVE_ARCH_KASAN_VMALLOC help - This mode makes KASAN check accesses to vmalloc allocations for - validity. + Makes KASAN check the validity of accesses to vmalloc allocations. - With software KASAN modes, checking is done for all types of vmalloc - allocations. Enabling this option leads to higher memory usage. + With software KASAN modes, all types vmalloc allocations are + checked. Enabling this option leads to higher memory usage. - With hardware tag-based KASAN, only VM_ALLOC mappings are checked. - There is no additional memory usage. + With Hardware Tag-Based KASAN, only non-executable VM_ALLOC mappings + are checked. There is no additional memory usage. config KASAN_KUNIT_TEST tristate "KUnit-compatible tests of KASAN bug detection capabilities" if !KUNIT_ALL_TESTS depends on KASAN && KUNIT default KUNIT_ALL_TESTS help - This is a KUnit test suite doing various nasty things like - out of bounds and use after free accesses. It is useful for testing - kernel debugging features like KASAN. + A KUnit-based KASAN test suite. Triggers different kinds of + out-of-bounds and use-after-free accesses. Useful for testing whether + KASAN can detect certain bug types. For more information on KUnit and unit tests in general, please refer - to the KUnit documentation in Documentation/dev-tools/kunit. + to the KUnit documentation in Documentation/dev-tools/kunit/. config KASAN_MODULE_TEST tristate "KUnit-incompatible tests of KASAN bug detection capabilities" depends on m && KASAN && !KASAN_HW_TAGS help - This is a part of the KASAN test suite that is incompatible with - KUnit. Currently includes tests that do bad copy_from/to_user - accesses. + A part of the KASAN test suite that is not integrated with KUnit. + Incompatible with Hardware Tag-Based KASAN. endif # KASAN diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index c4fe15d38b60..fd15230a703b 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -84,6 +84,9 @@ config UBSAN_SHIFT config UBSAN_DIV_ZERO bool "Perform checking for integer divide-by-zero" depends on $(cc-option,-fsanitize=integer-divide-by-zero) + # https://github.com/ClangBuiltLinux/linux/issues/1657 + # https://github.com/llvm/llvm-project/issues/56289 + depends on !CC_IS_CLANG help This option enables -fsanitize=integer-divide-by-zero which checks for integer division by zero. This is effectively redundant with the @@ -94,7 +97,7 @@ config UBSAN_UNREACHABLE bool "Perform checking for unreachable code" # objtool already handles unreachable checking and gets angry about # seeing UBSan instrumentation located in unreachable places. - depends on !(OBJTOOL && (STACK_VALIDATION || UNWINDER_ORC || X86_SMAP)) + depends on !(OBJTOOL && (STACK_VALIDATION || UNWINDER_ORC || HAVE_UACCESS_VALIDATION)) depends on $(cc-option,-fsanitize=unreachable) help This option enables -fsanitize=unreachable which checks for control diff --git a/lib/Makefile b/lib/Makefile index 89fcae891361..67482f5ec0e8 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -29,7 +29,7 @@ endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o timerqueue.o xarray.o \ - idr.o extable.o sha1.o irq_regs.o argv_split.o \ + idr.o extable.o irq_regs.o argv_split.o \ flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ @@ -251,6 +251,7 @@ obj-$(CONFIG_DIMLIB) += dim/ obj-$(CONFIG_SIGNATURE) += digsig.o lib-$(CONFIG_CLZ_TAB) += clz_tab.o +lib-$(CONFIG_LIB_MEMNEQ) += memneq.o obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o @@ -281,7 +282,15 @@ $(foreach file, $(libfdt_files), \ $(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt)) lib-$(CONFIG_LIBFDT) += $(libfdt_files) -lib-$(CONFIG_BOOT_CONFIG) += bootconfig.o +obj-$(CONFIG_BOOT_CONFIG) += bootconfig.o +obj-$(CONFIG_BOOT_CONFIG_EMBED) += bootconfig-data.o + +$(obj)/bootconfig-data.o: $(obj)/default.bconf + +targets += default.bconf +filechk_defbconf = cat $(or $(real-prereqs), /dev/null) +$(obj)/default.bconf: $(CONFIG_BOOT_CONFIG_EMBED_FILE) FORCE + $(call filechk,defbconf) obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 079c72e26493..ca0b4f360c1a 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -1461,6 +1461,7 @@ int assoc_array_gc(struct assoc_array *array, struct assoc_array_ptr *cursor, *ptr; struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp; unsigned long nr_leaves_on_tree; + bool retained; int keylen, slot, nr_free, next_slot, i; pr_devel("-->%s()\n", __func__); @@ -1536,6 +1537,7 @@ continue_node: goto descend; } +retry_compress: pr_devel("-- compress node %p --\n", new_n); /* Count up the number of empty slots in this node and work out the @@ -1553,6 +1555,7 @@ continue_node: pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch); /* See what we can fold in */ + retained = false; next_slot = 0; for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { struct assoc_array_shortcut *s; @@ -1602,9 +1605,14 @@ continue_node: pr_devel("[%d] retain node %lu/%d [nx %d]\n", slot, child->nr_leaves_on_branch, nr_free + 1, next_slot); + retained = true; } } + if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) { + pr_devel("internal nodes remain despite enough space, retrying\n"); + goto retry_compress; + } pr_devel("after: %lu\n", new_n->nr_leaves_on_branch); nr_leaves_on_tree = new_n->nr_leaves_on_branch; diff --git a/lib/bitmap.c b/lib/bitmap.c index 0d5c2ece0bcb..b18e31ea6e66 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -45,19 +45,19 @@ * for the best explanations of this ordering. */ -int __bitmap_equal(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int bits) +bool __bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int bits) { unsigned int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap1[k] != bitmap2[k]) - return 0; + return false; if (bits % BITS_PER_LONG) if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) - return 0; + return false; - return 1; + return true; } EXPORT_SYMBOL(__bitmap_equal); @@ -303,33 +303,33 @@ void __bitmap_replace(unsigned long *dst, } EXPORT_SYMBOL(__bitmap_replace); -int __bitmap_intersects(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int bits) +bool __bitmap_intersects(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int bits) { unsigned int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap1[k] & bitmap2[k]) - return 1; + return true; if (bits % BITS_PER_LONG) if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) - return 1; - return 0; + return true; + return false; } EXPORT_SYMBOL(__bitmap_intersects); -int __bitmap_subset(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int bits) +bool __bitmap_subset(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int bits) { unsigned int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap1[k] & ~bitmap2[k]) - return 0; + return false; if (bits % BITS_PER_LONG) if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) - return 0; - return 1; + return false; + return true; } EXPORT_SYMBOL(__bitmap_subset); @@ -527,33 +527,39 @@ static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp, * cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal * bitmask and decimal list to userspace by sysfs ABI. * Drivers might be using a normal attribute for this kind of ABIs. A - * normal attribute typically has show entry as below: - * static ssize_t example_attribute_show(struct device *dev, + * normal attribute typically has show entry as below:: + * + * static ssize_t example_attribute_show(struct device *dev, * struct device_attribute *attr, char *buf) - * { + * { * ... * return bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max); - * } + * } + * * show entry of attribute has no offset and count parameters and this * means the file is limited to one page only. * bitmap_print_to_pagebuf() API works terribly well for this kind of - * normal attribute with buf parameter and without offset, count: - * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, + * normal attribute with buf parameter and without offset, count:: + * + * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, * int nmaskbits) - * { - * } + * { + * } + * * The problem is once we have a large bitmap, we have a chance to get a * bitmask or list more than one page. Especially for list, it could be * as complex as 0,3,5,7,9,... We have no simple way to know it exact size. * It turns out bin_attribute is a way to break this limit. bin_attribute - * has show entry as below: - * static ssize_t - * example_bin_attribute_show(struct file *filp, struct kobject *kobj, + * has show entry as below:: + * + * static ssize_t + * example_bin_attribute_show(struct file *filp, struct kobject *kobj, * struct bin_attribute *attr, char *buf, * loff_t offset, size_t count) - * { + * { * ... - * } + * } + * * With the new offset and count parameters, this makes sysfs ABI be able * to support file size more than one page. For example, offset could be * >= 4096. @@ -577,6 +583,7 @@ static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp, * This function is not a replacement for sprintf() or bitmap_print_to_pagebuf(). * It is intended to workaround sysfs limitations discussed above and should be * used carefully in general case for the following reasons: + * * - Time complexity is O(nbits^2/count), comparing to O(nbits) for snprintf(). * - Memory complexity is O(nbits), comparing to O(1) for snprintf(). * - @off and @count are NOT offset and number of bits to print. @@ -1505,5 +1512,59 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits) buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31)); } EXPORT_SYMBOL(bitmap_to_arr32); +#endif + +#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN) +/** + * bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap + * @bitmap: array of unsigned longs, the destination bitmap + * @buf: array of u64 (in host byte order), the source bitmap + * @nbits: number of bits in @bitmap + */ +void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits) +{ + int n; + + for (n = nbits; n > 0; n -= 64) { + u64 val = *buf++; + *bitmap++ = val; + if (n > 32) + *bitmap++ = val >> 32; + } + + /* + * Clear tail bits in the last word beyond nbits. + * + * Negative index is OK because here we point to the word next + * to the last word of the bitmap, except for nbits == 0, which + * is tested implicitly. + */ + if (nbits % BITS_PER_LONG) + bitmap[-1] &= BITMAP_LAST_WORD_MASK(nbits); +} +EXPORT_SYMBOL(bitmap_from_arr64); + +/** + * bitmap_to_arr64 - copy the contents of bitmap to a u64 array of bits + * @buf: array of u64 (in host byte order), the dest bitmap + * @bitmap: array of unsigned longs, the source bitmap + * @nbits: number of bits in @bitmap + */ +void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits) +{ + const unsigned long *end = bitmap + BITS_TO_LONGS(nbits); + + while (bitmap < end) { + *buf = *bitmap++; + if (bitmap < end) + *buf |= (u64)(*bitmap++) << 32; + buf++; + } + + /* Clear tail bits in the last element of array beyond nbits. */ + if (nbits % 64) + buf[-1] &= GENMASK_ULL(nbits % 64, 0); +} +EXPORT_SYMBOL(bitmap_to_arr64); #endif diff --git a/lib/bootconfig-data.S b/lib/bootconfig-data.S new file mode 100644 index 000000000000..ef85ba1a82f4 --- /dev/null +++ b/lib/bootconfig-data.S @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Embed default bootconfig in the kernel. + */ + .section .init.rodata, "aw" + .global embedded_bootconfig_data +embedded_bootconfig_data: + .incbin "lib/default.bconf" + .global embedded_bootconfig_data_end +embedded_bootconfig_data_end: diff --git a/lib/bootconfig.c b/lib/bootconfig.c index 74f3201ab8e5..c59d26068a64 100644 --- a/lib/bootconfig.c +++ b/lib/bootconfig.c @@ -12,6 +12,19 @@ #include <linux/kernel.h> #include <linux/memblock.h> #include <linux/string.h> + +#ifdef CONFIG_BOOT_CONFIG_EMBED +/* embedded_bootconfig_data is defined in bootconfig-data.S */ +extern __visible const char embedded_bootconfig_data[]; +extern __visible const char embedded_bootconfig_data_end[]; + +const char * __init xbc_get_embedded_bootconfig(size_t *size) +{ + *size = embedded_bootconfig_data_end - embedded_bootconfig_data; + return (*size) ? embedded_bootconfig_data : NULL; +} +#endif + #else /* !__KERNEL__ */ /* * NOTE: This is only for tools/bootconfig, because tools/bootconfig will diff --git a/lib/crc-itu-t.c b/lib/crc-itu-t.c index 1974b355c148..1d26a1647da5 100644 --- a/lib/crc-itu-t.c +++ b/lib/crc-itu-t.c @@ -7,7 +7,7 @@ #include <linux/module.h> #include <linux/crc-itu-t.h> -/** CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^15 + 1) */ +/* CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^5 + 1) */ const u16 crc_itu_t_table[256] = { 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 379a66d7f504..9ff549f63540 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -71,6 +71,7 @@ config CRYPTO_LIB_CURVE25519 tristate "Curve25519 scalar multiplication library" depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519 select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n + select LIB_MEMNEQ help Enable the Curve25519 library interface. This interface may be fulfilled by either the generic implementation or an arch-specific @@ -120,13 +121,10 @@ config CRYPTO_LIB_CHACHA20POLY1305 select CRYPTO_LIB_POLY1305 select CRYPTO_ALGAPI -config CRYPTO_LIB_SHA256 - tristate - -config CRYPTO_LIB_SM3 +config CRYPTO_LIB_SHA1 tristate -config CRYPTO_LIB_SM4 +config CRYPTO_LIB_SHA256 tristate endmenu diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 6c872d05d1e6..919cbb2c220d 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -34,15 +34,12 @@ libpoly1305-y := poly1305-donna32.o libpoly1305-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o libpoly1305-y += poly1305.o +obj-$(CONFIG_CRYPTO_LIB_SHA1) += libsha1.o +libsha1-y := sha1.o + obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o libsha256-y := sha256.o -obj-$(CONFIG_CRYPTO_LIB_SM3) += libsm3.o -libsm3-y := sm3.o - -obj-$(CONFIG_CRYPTO_LIB_SM4) += libsm4.o -libsm4-y := sm4.o - ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y) libblake2s-y += blake2s-selftest.o libchacha20poly1305-y += chacha20poly1305-selftest.o diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c index 409e4b728770..7d77dea15587 100644 --- a/lib/crypto/blake2s-selftest.c +++ b/lib/crypto/blake2s-selftest.c @@ -4,6 +4,8 @@ */ #include <crypto/internal/blake2s.h> +#include <linux/kernel.h> +#include <linux/random.h> #include <linux/string.h> /* @@ -587,5 +589,44 @@ bool __init blake2s_selftest(void) } } + for (i = 0; i < 32; ++i) { + enum { TEST_ALIGNMENT = 16 }; + u8 unaligned_block[BLAKE2S_BLOCK_SIZE + TEST_ALIGNMENT - 1] + __aligned(TEST_ALIGNMENT); + u8 blocks[BLAKE2S_BLOCK_SIZE * 2]; + struct blake2s_state state1, state2; + + get_random_bytes(blocks, sizeof(blocks)); + get_random_bytes(&state, sizeof(state)); + +#if defined(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) && \ + defined(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S) + memcpy(&state1, &state, sizeof(state1)); + memcpy(&state2, &state, sizeof(state2)); + blake2s_compress(&state1, blocks, 2, BLAKE2S_BLOCK_SIZE); + blake2s_compress_generic(&state2, blocks, 2, BLAKE2S_BLOCK_SIZE); + if (memcmp(&state1, &state2, sizeof(state1))) { + pr_err("blake2s random compress self-test %d: FAIL\n", + i + 1); + success = false; + } +#endif + + memcpy(&state1, &state, sizeof(state1)); + blake2s_compress(&state1, blocks, 1, BLAKE2S_BLOCK_SIZE); + for (l = 1; l < TEST_ALIGNMENT; ++l) { + memcpy(unaligned_block + l, blocks, + BLAKE2S_BLOCK_SIZE); + memcpy(&state2, &state, sizeof(state2)); + blake2s_compress(&state2, unaligned_block + l, 1, + BLAKE2S_BLOCK_SIZE); + if (memcmp(&state1, &state2, sizeof(state1))) { + pr_err("blake2s random compress align %d self-test %d: FAIL\n", + l, i + 1); + success = false; + } + } + } + return success; } diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index c71c09621c09..98e688c6d891 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -16,16 +16,44 @@ #include <linux/init.h> #include <linux/bug.h> +static inline void blake2s_set_lastblock(struct blake2s_state *state) +{ + state->f[0] = -1; +} + void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen) { - __blake2s_update(state, in, inlen, false); + const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; + + if (unlikely(!inlen)) + return; + if (inlen > fill) { + memcpy(state->buf + state->buflen, in, fill); + blake2s_compress(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); + state->buflen = 0; + in += fill; + inlen -= fill; + } + if (inlen > BLAKE2S_BLOCK_SIZE) { + const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); + blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); + in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); + inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); + } + memcpy(state->buf + state->buflen, in, inlen); + state->buflen += inlen; } EXPORT_SYMBOL(blake2s_update); void blake2s_final(struct blake2s_state *state, u8 *out) { WARN_ON(IS_ENABLED(DEBUG) && !out); - __blake2s_final(state, out, false); + blake2s_set_lastblock(state); + memset(state->buf + state->buflen, 0, + BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ + blake2s_compress(state, state->buf, 1, state->buflen); + cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); + memcpy(out, state->h, state->outlen); memzero_explicit(state, sizeof(*state)); } EXPORT_SYMBOL(blake2s_final); @@ -38,12 +66,7 @@ static int __init blake2s_mod_init(void) return 0; } -static void __exit blake2s_mod_exit(void) -{ -} - module_init(blake2s_mod_init); -module_exit(blake2s_mod_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("BLAKE2s hash function"); MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); diff --git a/lib/sha1.c b/lib/crypto/sha1.c index 0494766fc574..1aebe7be9401 100644 --- a/lib/sha1.c +++ b/lib/crypto/sha1.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/export.h> +#include <linux/module.h> #include <linux/bitops.h> #include <linux/string.h> #include <crypto/sha1.h> @@ -135,3 +136,5 @@ void sha1_init(__u32 *buf) buf[4] = 0xc3d2e1f0; } EXPORT_SYMBOL(sha1_init); + +MODULE_LICENSE("GPL"); diff --git a/lib/crypto/sm3.c b/lib/crypto/sm3.c deleted file mode 100644 index d473e358a873..000000000000 --- a/lib/crypto/sm3.c +++ /dev/null @@ -1,246 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described - * at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02 - * - * Copyright (C) 2017 ARM Limited or its affiliates. - * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com> - * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> - */ - -#include <linux/module.h> -#include <asm/unaligned.h> -#include <crypto/sm3.h> - -static const u32 ____cacheline_aligned K[64] = { - 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb, - 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc, - 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce, - 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6, - 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, - 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, - 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, - 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5, - 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53, - 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d, - 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4, - 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43, - 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, - 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, - 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, - 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5 -}; - -/* - * Transform the message X which consists of 16 32-bit-words. See - * GM/T 004-2012 for details. - */ -#define R(i, a, b, c, d, e, f, g, h, t, w1, w2) \ - do { \ - ss1 = rol32((rol32((a), 12) + (e) + (t)), 7); \ - ss2 = ss1 ^ rol32((a), 12); \ - d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2)); \ - h += GG ## i(e, f, g) + ss1 + (w1); \ - b = rol32((b), 9); \ - f = rol32((f), 19); \ - h = P0((h)); \ - } while (0) - -#define R1(a, b, c, d, e, f, g, h, t, w1, w2) \ - R(1, a, b, c, d, e, f, g, h, t, w1, w2) -#define R2(a, b, c, d, e, f, g, h, t, w1, w2) \ - R(2, a, b, c, d, e, f, g, h, t, w1, w2) - -#define FF1(x, y, z) (x ^ y ^ z) -#define FF2(x, y, z) ((x & y) | (x & z) | (y & z)) - -#define GG1(x, y, z) FF1(x, y, z) -#define GG2(x, y, z) ((x & y) | (~x & z)) - -/* Message expansion */ -#define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17)) -#define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23)) -#define I(i) (W[i] = get_unaligned_be32(data + i * 4)) -#define W1(i) (W[i & 0x0f]) -#define W2(i) (W[i & 0x0f] = \ - P1(W[i & 0x0f] \ - ^ W[(i-9) & 0x0f] \ - ^ rol32(W[(i-3) & 0x0f], 15)) \ - ^ rol32(W[(i-13) & 0x0f], 7) \ - ^ W[(i-6) & 0x0f]) - -static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16]) -{ - u32 a, b, c, d, e, f, g, h, ss1, ss2; - - a = sctx->state[0]; - b = sctx->state[1]; - c = sctx->state[2]; - d = sctx->state[3]; - e = sctx->state[4]; - f = sctx->state[5]; - g = sctx->state[6]; - h = sctx->state[7]; - - R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4)); - R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5)); - R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6)); - R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7)); - R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8)); - R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9)); - R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10)); - R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11)); - R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12)); - R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13)); - R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14)); - R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15)); - R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16)); - R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17)); - R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18)); - R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19)); - - R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20)); - R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21)); - R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22)); - R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23)); - R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24)); - R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25)); - R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26)); - R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27)); - R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28)); - R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29)); - R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30)); - R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31)); - R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32)); - R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33)); - R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34)); - R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35)); - - R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36)); - R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37)); - R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38)); - R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39)); - R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40)); - R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41)); - R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42)); - R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43)); - R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44)); - R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45)); - R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46)); - R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47)); - R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48)); - R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49)); - R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50)); - R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51)); - - R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52)); - R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53)); - R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54)); - R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55)); - R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56)); - R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57)); - R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58)); - R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59)); - R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60)); - R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61)); - R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62)); - R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63)); - R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64)); - R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65)); - R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66)); - R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67)); - - sctx->state[0] ^= a; - sctx->state[1] ^= b; - sctx->state[2] ^= c; - sctx->state[3] ^= d; - sctx->state[4] ^= e; - sctx->state[5] ^= f; - sctx->state[6] ^= g; - sctx->state[7] ^= h; -} -#undef R -#undef R1 -#undef R2 -#undef I -#undef W1 -#undef W2 - -static inline void sm3_block(struct sm3_state *sctx, - u8 const *data, int blocks, u32 W[16]) -{ - while (blocks--) { - sm3_transform(sctx, data, W); - data += SM3_BLOCK_SIZE; - } -} - -void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len) -{ - unsigned int partial = sctx->count % SM3_BLOCK_SIZE; - u32 W[16]; - - sctx->count += len; - - if ((partial + len) >= SM3_BLOCK_SIZE) { - int blocks; - - if (partial) { - int p = SM3_BLOCK_SIZE - partial; - - memcpy(sctx->buffer + partial, data, p); - data += p; - len -= p; - - sm3_block(sctx, sctx->buffer, 1, W); - } - - blocks = len / SM3_BLOCK_SIZE; - len %= SM3_BLOCK_SIZE; - - if (blocks) { - sm3_block(sctx, data, blocks, W); - data += blocks * SM3_BLOCK_SIZE; - } - - memzero_explicit(W, sizeof(W)); - - partial = 0; - } - if (len) - memcpy(sctx->buffer + partial, data, len); -} -EXPORT_SYMBOL_GPL(sm3_update); - -void sm3_final(struct sm3_state *sctx, u8 *out) -{ - const int bit_offset = SM3_BLOCK_SIZE - sizeof(u64); - __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); - __be32 *digest = (__be32 *)out; - unsigned int partial = sctx->count % SM3_BLOCK_SIZE; - u32 W[16]; - int i; - - sctx->buffer[partial++] = 0x80; - if (partial > bit_offset) { - memset(sctx->buffer + partial, 0, SM3_BLOCK_SIZE - partial); - partial = 0; - - sm3_block(sctx, sctx->buffer, 1, W); - } - - memset(sctx->buffer + partial, 0, bit_offset - partial); - *bits = cpu_to_be64(sctx->count << 3); - sm3_block(sctx, sctx->buffer, 1, W); - - for (i = 0; i < 8; i++) - put_unaligned_be32(sctx->state[i], digest++); - - /* Zeroize sensitive information. */ - memzero_explicit(W, sizeof(W)); - memzero_explicit(sctx, sizeof(*sctx)); -} -EXPORT_SYMBOL_GPL(sm3_final); - -MODULE_DESCRIPTION("Generic SM3 library"); -MODULE_LICENSE("GPL v2"); diff --git a/lib/crypto/sm4.c b/lib/crypto/sm4.c deleted file mode 100644 index 284e62576d0c..000000000000 --- a/lib/crypto/sm4.c +++ /dev/null @@ -1,176 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * SM4, as specified in - * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html - * - * Copyright (C) 2018 ARM Limited or its affiliates. - * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> - */ - -#include <linux/module.h> -#include <asm/unaligned.h> -#include <crypto/sm4.h> - -static const u32 fk[4] = { - 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc -}; - -static const u32 ____cacheline_aligned ck[32] = { - 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269, - 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9, - 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249, - 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9, - 0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229, - 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299, - 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209, - 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279 -}; - -static const u8 ____cacheline_aligned sbox[256] = { - 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, - 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05, - 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, - 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, - 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, - 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62, - 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, - 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6, - 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, - 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8, - 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, - 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35, - 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, - 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87, - 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, - 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e, - 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, - 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1, - 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, - 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3, - 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, - 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f, - 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, - 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51, - 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, - 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8, - 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, - 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0, - 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, - 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84, - 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, - 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48 -}; - -static inline u32 sm4_t_non_lin_sub(u32 x) -{ - u32 out; - - out = (u32)sbox[x & 0xff]; - out |= (u32)sbox[(x >> 8) & 0xff] << 8; - out |= (u32)sbox[(x >> 16) & 0xff] << 16; - out |= (u32)sbox[(x >> 24) & 0xff] << 24; - - return out; -} - -static inline u32 sm4_key_lin_sub(u32 x) -{ - return x ^ rol32(x, 13) ^ rol32(x, 23); -} - -static inline u32 sm4_enc_lin_sub(u32 x) -{ - return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24); -} - -static inline u32 sm4_key_sub(u32 x) -{ - return sm4_key_lin_sub(sm4_t_non_lin_sub(x)); -} - -static inline u32 sm4_enc_sub(u32 x) -{ - return sm4_enc_lin_sub(sm4_t_non_lin_sub(x)); -} - -static inline u32 sm4_round(u32 x0, u32 x1, u32 x2, u32 x3, u32 rk) -{ - return x0 ^ sm4_enc_sub(x1 ^ x2 ^ x3 ^ rk); -} - - -/** - * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016 - * @ctx: The location where the computed key will be stored. - * @in_key: The supplied key. - * @key_len: The length of the supplied key. - * - * Returns 0 on success. The function fails only if an invalid key size (or - * pointer) is supplied. - */ -int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key, - unsigned int key_len) -{ - u32 rk[4]; - const u32 *key = (u32 *)in_key; - int i; - - if (key_len != SM4_KEY_SIZE) - return -EINVAL; - - rk[0] = get_unaligned_be32(&key[0]) ^ fk[0]; - rk[1] = get_unaligned_be32(&key[1]) ^ fk[1]; - rk[2] = get_unaligned_be32(&key[2]) ^ fk[2]; - rk[3] = get_unaligned_be32(&key[3]) ^ fk[3]; - - for (i = 0; i < 32; i += 4) { - rk[0] ^= sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i + 0]); - rk[1] ^= sm4_key_sub(rk[2] ^ rk[3] ^ rk[0] ^ ck[i + 1]); - rk[2] ^= sm4_key_sub(rk[3] ^ rk[0] ^ rk[1] ^ ck[i + 2]); - rk[3] ^= sm4_key_sub(rk[0] ^ rk[1] ^ rk[2] ^ ck[i + 3]); - - ctx->rkey_enc[i + 0] = rk[0]; - ctx->rkey_enc[i + 1] = rk[1]; - ctx->rkey_enc[i + 2] = rk[2]; - ctx->rkey_enc[i + 3] = rk[3]; - ctx->rkey_dec[31 - 0 - i] = rk[0]; - ctx->rkey_dec[31 - 1 - i] = rk[1]; - ctx->rkey_dec[31 - 2 - i] = rk[2]; - ctx->rkey_dec[31 - 3 - i] = rk[3]; - } - - return 0; -} -EXPORT_SYMBOL_GPL(sm4_expandkey); - -/** - * sm4_crypt_block - Encrypt or decrypt a single SM4 block - * @rk: The rkey_enc for encrypt or rkey_dec for decrypt - * @out: Buffer to store output data - * @in: Buffer containing the input data - */ -void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in) -{ - u32 x[4], i; - - x[0] = get_unaligned_be32(in + 0 * 4); - x[1] = get_unaligned_be32(in + 1 * 4); - x[2] = get_unaligned_be32(in + 2 * 4); - x[3] = get_unaligned_be32(in + 3 * 4); - - for (i = 0; i < 32; i += 4) { - x[0] = sm4_round(x[0], x[1], x[2], x[3], rk[i + 0]); - x[1] = sm4_round(x[1], x[2], x[3], x[0], rk[i + 1]); - x[2] = sm4_round(x[2], x[3], x[0], x[1], rk[i + 2]); - x[3] = sm4_round(x[3], x[0], x[1], x[2], rk[i + 3]); - } - - put_unaligned_be32(x[3 - 0], out + 0 * 4); - put_unaligned_be32(x[3 - 1], out + 1 * 4); - put_unaligned_be32(x[3 - 2], out + 2 * 4); - put_unaligned_be32(x[3 - 3], out + 3 * 4); -} -EXPORT_SYMBOL_GPL(sm4_crypt_block); - -MODULE_DESCRIPTION("Generic SM4 library"); -MODULE_LICENSE("GPL v2"); diff --git a/lib/dump_stack.c b/lib/dump_stack.c index 6b7f1bf6715d..83471e81501a 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl(const char *log_lvl) * Permit this cpu to perform nested stack dumps while serialising * against other CPUs */ - printk_cpu_lock_irqsave(flags); + printk_cpu_sync_get_irqsave(flags); __dump_stack(log_lvl); - printk_cpu_unlock_irqrestore(flags); + printk_cpu_sync_put_irqrestore(flags); } EXPORT_SYMBOL(dump_stack_lvl); diff --git a/lib/fault-inject.c b/lib/fault-inject.c index ce12621b4275..423784d9c058 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -41,6 +41,9 @@ EXPORT_SYMBOL_GPL(setup_fault_attr); static void fail_dump(struct fault_attr *attr) { + if (attr->no_warn) + return; + if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) { printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" "name %pd, interval %lu, probability %lu, " diff --git a/lib/glob.c b/lib/glob.c index 85ecbda45cd8..15b73f490720 100644 --- a/lib/glob.c +++ b/lib/glob.c @@ -45,7 +45,7 @@ bool __pure glob_match(char const *pat, char const *str) * (no exception for /), it can be easily proved that there's * never a need to backtrack multiple levels. */ - char const *back_pat = NULL, *back_str = back_str; + char const *back_pat = NULL, *back_str; /* * Loop over each token (character or class) in pat, matching diff --git a/lib/idr.c b/lib/idr.c index f4ab4f4aa3c7..7ecdfdb5309e 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -491,7 +491,8 @@ void ida_free(struct ida *ida, unsigned int id) struct ida_bitmap *bitmap; unsigned long flags; - BUG_ON((int)id < 0); + if ((int)id < 0) + return; xas_lock_irqsave(&xas, flags); bitmap = xas_load(&xas); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 6dd5330f7a99..0e0be334dbee 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -168,174 +168,6 @@ static int copyin(void *to, const void __user *from, size_t n) return n; } -static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, - struct iov_iter *i) -{ - size_t skip, copy, left, wanted; - const struct iovec *iov; - char __user *buf; - void *kaddr, *from; - - if (unlikely(bytes > i->count)) - bytes = i->count; - - if (unlikely(!bytes)) - return 0; - - might_fault(); - wanted = bytes; - iov = i->iov; - skip = i->iov_offset; - buf = iov->iov_base + skip; - copy = min(bytes, iov->iov_len - skip); - - if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) { - kaddr = kmap_atomic(page); - from = kaddr + offset; - - /* first chunk, usually the only one */ - left = copyout(buf, from, copy); - copy -= left; - skip += copy; - from += copy; - bytes -= copy; - - while (unlikely(!left && bytes)) { - iov++; - buf = iov->iov_base; - copy = min(bytes, iov->iov_len); - left = copyout(buf, from, copy); - copy -= left; - skip = copy; - from += copy; - bytes -= copy; - } - if (likely(!bytes)) { - kunmap_atomic(kaddr); - goto done; - } - offset = from - kaddr; - buf += copy; - kunmap_atomic(kaddr); - copy = min(bytes, iov->iov_len - skip); - } - /* Too bad - revert to non-atomic kmap */ - - kaddr = kmap(page); - from = kaddr + offset; - left = copyout(buf, from, copy); - copy -= left; - skip += copy; - from += copy; - bytes -= copy; - while (unlikely(!left && bytes)) { - iov++; - buf = iov->iov_base; - copy = min(bytes, iov->iov_len); - left = copyout(buf, from, copy); - copy -= left; - skip = copy; - from += copy; - bytes -= copy; - } - kunmap(page); - -done: - if (skip == iov->iov_len) { - iov++; - skip = 0; - } - i->count -= wanted - bytes; - i->nr_segs -= iov - i->iov; - i->iov = iov; - i->iov_offset = skip; - return wanted - bytes; -} - -static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, - struct iov_iter *i) -{ - size_t skip, copy, left, wanted; - const struct iovec *iov; - char __user *buf; - void *kaddr, *to; - - if (unlikely(bytes > i->count)) - bytes = i->count; - - if (unlikely(!bytes)) - return 0; - - might_fault(); - wanted = bytes; - iov = i->iov; - skip = i->iov_offset; - buf = iov->iov_base + skip; - copy = min(bytes, iov->iov_len - skip); - - if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) { - kaddr = kmap_atomic(page); - to = kaddr + offset; - - /* first chunk, usually the only one */ - left = copyin(to, buf, copy); - copy -= left; - skip += copy; - to += copy; - bytes -= copy; - - while (unlikely(!left && bytes)) { - iov++; - buf = iov->iov_base; - copy = min(bytes, iov->iov_len); - left = copyin(to, buf, copy); - copy -= left; - skip = copy; - to += copy; - bytes -= copy; - } - if (likely(!bytes)) { - kunmap_atomic(kaddr); - goto done; - } - offset = to - kaddr; - buf += copy; - kunmap_atomic(kaddr); - copy = min(bytes, iov->iov_len - skip); - } - /* Too bad - revert to non-atomic kmap */ - - kaddr = kmap(page); - to = kaddr + offset; - left = copyin(to, buf, copy); - copy -= left; - skip += copy; - to += copy; - bytes -= copy; - while (unlikely(!left && bytes)) { - iov++; - buf = iov->iov_base; - copy = min(bytes, iov->iov_len); - left = copyin(to, buf, copy); - copy -= left; - skip = copy; - to += copy; - bytes -= copy; - } - kunmap(page); - -done: - if (skip == iov->iov_len) { - iov++; - skip = 0; - } - i->count -= wanted - bytes; - i->nr_segs -= iov - i->iov; - i->iov = iov; - i->iov_offset = skip; - return wanted - bytes; -} - #ifdef PIPE_PARANOIA static bool sanity(const struct iov_iter *i) { @@ -689,6 +521,7 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, struct pipe_inode_info *pipe = i->pipe; unsigned int p_mask = pipe->ring_size - 1; unsigned int i_head; + unsigned int valid = pipe->head; size_t n, off, xfer = 0; if (!sanity(i)) @@ -702,11 +535,17 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); chunk -= rem; kunmap_local(p); - i->head = i_head; - i->iov_offset = off + chunk; - xfer += chunk; - if (rem) + if (chunk) { + i->head = i_head; + i->iov_offset = off + chunk; + xfer += chunk; + valid = i_head + 1; + } + if (rem) { + pipe->bufs[i_head & p_mask].len -= rem; + pipe_discard_from(pipe, valid); break; + } n -= chunk; off = 0; i_head++; @@ -848,24 +687,14 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { - if (likely(iter_is_iovec(i))) - return copy_page_to_iter_iovec(page, offset, bytes, i); - if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) { + if (unlikely(iov_iter_is_pipe(i))) { + return copy_page_to_iter_pipe(page, offset, bytes, i); + } else { void *kaddr = kmap_local_page(page); size_t wanted = _copy_to_iter(kaddr + offset, bytes, i); kunmap_local(kaddr); return wanted; } - if (iov_iter_is_pipe(i)) - return copy_page_to_iter_pipe(page, offset, bytes, i); - if (unlikely(iov_iter_is_discard(i))) { - if (unlikely(i->count < bytes)) - bytes = i->count; - i->count -= bytes; - return bytes; - } - WARN_ON(1); - return 0; } size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, @@ -896,17 +725,12 @@ EXPORT_SYMBOL(copy_page_to_iter); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { - if (unlikely(!page_copy_sane(page, offset, bytes))) - return 0; - if (likely(iter_is_iovec(i))) - return copy_page_from_iter_iovec(page, offset, bytes, i); - if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) { + if (page_copy_sane(page, offset, bytes)) { void *kaddr = kmap_local_page(page); size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); kunmap_local(kaddr); return wanted; } - WARN_ON(1); return 0; } EXPORT_SYMBOL(copy_page_from_iter); @@ -1029,17 +853,22 @@ static void pipe_advance(struct iov_iter *i, size_t size) static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) { - struct bvec_iter bi; + const struct bio_vec *bvec, *end; - bi.bi_size = i->count; - bi.bi_bvec_done = i->iov_offset; - bi.bi_idx = 0; - bvec_iter_advance(i->bvec, &bi, size); + if (!i->count) + return; + i->count -= size; + + size += i->iov_offset; - i->bvec += bi.bi_idx; - i->nr_segs -= bi.bi_idx; - i->count = bi.bi_size; - i->iov_offset = bi.bi_bvec_done; + for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { + if (likely(size < bvec->bv_len)) + break; + size -= bvec->bv_len; + } + i->iov_offset = size; + i->nr_segs -= bvec - i->bvec; + i->bvec = bvec; } static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) @@ -1268,6 +1097,98 @@ void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) } EXPORT_SYMBOL(iov_iter_discard); +static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, + unsigned len_mask) +{ + size_t size = i->count; + size_t skip = i->iov_offset; + unsigned k; + + for (k = 0; k < i->nr_segs; k++, skip = 0) { + size_t len = i->iov[k].iov_len - skip; + + if (len > size) + len = size; + if (len & len_mask) + return false; + if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask) + return false; + + size -= len; + if (!size) + break; + } + return true; +} + +static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, + unsigned len_mask) +{ + size_t size = i->count; + unsigned skip = i->iov_offset; + unsigned k; + + for (k = 0; k < i->nr_segs; k++, skip = 0) { + size_t len = i->bvec[k].bv_len - skip; + + if (len > size) + len = size; + if (len & len_mask) + return false; + if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) + return false; + + size -= len; + if (!size) + break; + } + return true; +} + +/** + * iov_iter_is_aligned() - Check if the addresses and lengths of each segments + * are aligned to the parameters. + * + * @i: &struct iov_iter to restore + * @addr_mask: bit mask to check against the iov element's addresses + * @len_mask: bit mask to check against the iov element's lengths + * + * Return: false if any addresses or lengths intersect with the provided masks + */ +bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, + unsigned len_mask) +{ + if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) + return iov_iter_aligned_iovec(i, addr_mask, len_mask); + + if (iov_iter_is_bvec(i)) + return iov_iter_aligned_bvec(i, addr_mask, len_mask); + + if (iov_iter_is_pipe(i)) { + unsigned int p_mask = i->pipe->ring_size - 1; + size_t size = i->count; + + if (size & len_mask) + return false; + if (size && allocated(&i->pipe->bufs[i->head & p_mask])) { + if (i->iov_offset & addr_mask) + return false; + } + + return true; + } + + if (iov_iter_is_xarray(i)) { + if (i->count & len_mask) + return false; + if ((i->xarray_start + i->iov_offset) & addr_mask) + return false; + } + + return true; +} +EXPORT_SYMBOL_GPL(iov_iter_is_aligned); + static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) { unsigned long res = 0; @@ -1434,7 +1355,7 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i, { unsigned nr, offset; pgoff_t index, count; - size_t size = maxsize, actual; + size_t size = maxsize; loff_t pos; if (!size || !maxpages) @@ -1461,57 +1382,40 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i, if (nr == 0) return 0; - actual = PAGE_SIZE * nr; - actual -= offset; - if (nr == count && size > 0) { - unsigned last_offset = (nr > 1) ? 0 : offset; - actual -= PAGE_SIZE - (last_offset + size); - } - return actual; + return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); } /* must be done on non-empty ITER_IOVEC one */ -static unsigned long first_iovec_segment(const struct iov_iter *i, - size_t *size, size_t *start, - size_t maxsize, unsigned maxpages) +static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) { size_t skip; long k; for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { - unsigned long addr = (unsigned long)i->iov[k].iov_base + skip; size_t len = i->iov[k].iov_len - skip; if (unlikely(!len)) continue; - if (len > maxsize) - len = maxsize; - len += (*start = addr % PAGE_SIZE); - if (len > maxpages * PAGE_SIZE) - len = maxpages * PAGE_SIZE; - *size = len; - return addr & PAGE_MASK; + if (*size > len) + *size = len; + return (unsigned long)i->iov[k].iov_base + skip; } BUG(); // if it had been empty, we wouldn't get called } /* must be done on non-empty ITER_BVEC one */ static struct page *first_bvec_segment(const struct iov_iter *i, - size_t *size, size_t *start, - size_t maxsize, unsigned maxpages) + size_t *size, size_t *start) { struct page *page; size_t skip = i->iov_offset, len; len = i->bvec->bv_len - skip; - if (len > maxsize) - len = maxsize; + if (*size > len) + *size = len; skip += i->bvec->bv_offset; page = i->bvec->bv_page + skip / PAGE_SIZE; - len += (*start = skip % PAGE_SIZE); - if (len > maxpages * PAGE_SIZE) - len = maxpages * PAGE_SIZE; - *size = len; + *start = skip % PAGE_SIZE; return page; } @@ -1519,13 +1423,14 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) { - size_t len; int n, res; if (maxsize > i->count) maxsize = i->count; if (!maxsize) return 0; + if (maxsize > MAX_RW_COUNT) + maxsize = MAX_RW_COUNT; if (likely(iter_is_iovec(i))) { unsigned int gup_flags = 0; @@ -1536,21 +1441,27 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, if (i->nofault) gup_flags |= FOLL_NOFAULT; - addr = first_iovec_segment(i, &len, start, maxsize, maxpages); - n = DIV_ROUND_UP(len, PAGE_SIZE); + addr = first_iovec_segment(i, &maxsize); + *start = addr % PAGE_SIZE; + addr &= PAGE_MASK; + n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); + if (n > maxpages) + n = maxpages; res = get_user_pages_fast(addr, n, gup_flags, pages); if (unlikely(res <= 0)) return res; - return (res == n ? len : res * PAGE_SIZE) - *start; + return min_t(size_t, maxsize, res * PAGE_SIZE - *start); } if (iov_iter_is_bvec(i)) { struct page *page; - page = first_bvec_segment(i, &len, start, maxsize, maxpages); - n = DIV_ROUND_UP(len, PAGE_SIZE); - while (n--) + page = first_bvec_segment(i, &maxsize, start); + n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); + if (n > maxpages) + n = maxpages; + for (int k = 0; k < n; k++) get_page(*pages++ = page++); - return len - *start; + return min_t(size_t, maxsize, n * PAGE_SIZE - *start); } if (iov_iter_is_pipe(i)) return pipe_get_pages(i, pages, maxsize, maxpages, start); @@ -1602,7 +1513,7 @@ static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, struct page **p; unsigned nr, offset; pgoff_t index, count; - size_t size = maxsize, actual; + size_t size = maxsize; loff_t pos; if (!size) @@ -1631,13 +1542,7 @@ static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, if (nr == 0) return 0; - actual = PAGE_SIZE * nr; - actual -= offset; - if (nr == count && size > 0) { - unsigned last_offset = (nr > 1) ? 0 : offset; - actual -= PAGE_SIZE - (last_offset + size); - } - return actual; + return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); } ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, @@ -1645,13 +1550,14 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, size_t *start) { struct page **p; - size_t len; int n, res; if (maxsize > i->count) maxsize = i->count; if (!maxsize) return 0; + if (maxsize > MAX_RW_COUNT) + maxsize = MAX_RW_COUNT; if (likely(iter_is_iovec(i))) { unsigned int gup_flags = 0; @@ -1662,8 +1568,10 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, if (i->nofault) gup_flags |= FOLL_NOFAULT; - addr = first_iovec_segment(i, &len, start, maxsize, ~0U); - n = DIV_ROUND_UP(len, PAGE_SIZE); + addr = first_iovec_segment(i, &maxsize); + *start = addr % PAGE_SIZE; + addr &= PAGE_MASK; + n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); p = get_pages_array(n); if (!p) return -ENOMEM; @@ -1674,19 +1582,19 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, return res; } *pages = p; - return (res == n ? len : res * PAGE_SIZE) - *start; + return min_t(size_t, maxsize, res * PAGE_SIZE - *start); } if (iov_iter_is_bvec(i)) { struct page *page; - page = first_bvec_segment(i, &len, start, maxsize, ~0U); - n = DIV_ROUND_UP(len, PAGE_SIZE); + page = first_bvec_segment(i, &maxsize, start); + n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); *pages = p = get_pages_array(n); if (!p) return -ENOMEM; - while (n--) + for (int k = 0; k < n; k++) get_page(*p++ = page++); - return len - *start; + return min_t(size_t, maxsize, n * PAGE_SIZE - *start); } if (iov_iter_is_pipe(i)) return pipe_get_pages_alloc(i, pages, maxsize, start); diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 886510d248e5..08c14019841a 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c @@ -340,7 +340,7 @@ EXPORT_SYMBOL(kstrtos8); * @s: input string * @res: result * - * This routine returns 0 iff the first character is one of 'Yy1Nn0', or + * This routine returns 0 iff the first character is one of 'YyTt1NnFf0', or * [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value * pointed to by res is updated upon finding a match. */ @@ -353,11 +353,15 @@ int kstrtobool(const char *s, bool *res) switch (s[0]) { case 'y': case 'Y': + case 't': + case 'T': case '1': *res = true; return 0; case 'n': case 'N': + case 'f': + case 'F': case '0': *res = false; return 0; diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile index c49f4ffb6273..29aff6562b42 100644 --- a/lib/kunit/Makefile +++ b/lib/kunit/Makefile @@ -1,6 +1,7 @@ obj-$(CONFIG_KUNIT) += kunit.o kunit-objs += test.o \ + resource.o \ string-stream.o \ assert.o \ try-catch.o \ diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c index b71db0abc12b..1048ef1b8d6e 100644 --- a/lib/kunit/debugfs.c +++ b/lib/kunit/debugfs.c @@ -52,7 +52,7 @@ static void debugfs_print_result(struct seq_file *seq, static int debugfs_print_results(struct seq_file *seq, void *v) { struct kunit_suite *suite = (struct kunit_suite *)seq->private; - bool success = kunit_suite_has_succeeded(suite); + enum kunit_status success = kunit_suite_has_succeeded(suite); struct kunit_case *test_case; if (!suite || !suite->log) diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c index 22640c9ee819..5e223327196a 100644 --- a/lib/kunit/executor.c +++ b/lib/kunit/executor.c @@ -9,8 +9,8 @@ * These symbols point to the .kunit_test_suites section and are defined in * include/asm-generic/vmlinux.lds.h, and consequently must be extern. */ -extern struct kunit_suite * const * const __kunit_suites_start[]; -extern struct kunit_suite * const * const __kunit_suites_end[]; +extern struct kunit_suite * const __kunit_suites_start[]; +extern struct kunit_suite * const __kunit_suites_end[]; #if IS_BUILTIN(CONFIG_KUNIT) @@ -55,7 +55,7 @@ static void kunit_parse_filter_glob(struct kunit_test_filter *parsed, /* Create a copy of suite with only tests that match test_glob. */ static struct kunit_suite * -kunit_filter_tests(struct kunit_suite *const suite, const char *test_glob) +kunit_filter_tests(const struct kunit_suite *const suite, const char *test_glob) { int n = 0; struct kunit_case *filtered, *test_case; @@ -69,11 +69,15 @@ kunit_filter_tests(struct kunit_suite *const suite, const char *test_glob) if (n == 0) return NULL; - /* Use memcpy to workaround copy->name being const. */ - copy = kmalloc(sizeof(*copy), GFP_KERNEL); - memcpy(copy, suite, sizeof(*copy)); + copy = kmemdup(suite, sizeof(*copy), GFP_KERNEL); + if (!copy) + return ERR_PTR(-ENOMEM); filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL); + if (!filtered) { + kfree(copy); + return ERR_PTR(-ENOMEM); + } n = 0; kunit_suite_for_each_test_case(suite, test_case) { @@ -88,68 +92,27 @@ kunit_filter_tests(struct kunit_suite *const suite, const char *test_glob) static char *kunit_shutdown; core_param(kunit_shutdown, kunit_shutdown, charp, 0644); -static struct kunit_suite * const * -kunit_filter_subsuite(struct kunit_suite * const * const subsuite, - struct kunit_test_filter *filter) -{ - int i, n = 0; - struct kunit_suite **filtered, *filtered_suite; - - n = 0; - for (i = 0; subsuite[i]; ++i) { - if (glob_match(filter->suite_glob, subsuite[i]->name)) - ++n; - } - - if (n == 0) - return NULL; - - filtered = kmalloc_array(n + 1, sizeof(*filtered), GFP_KERNEL); - if (!filtered) - return NULL; - - n = 0; - for (i = 0; subsuite[i] != NULL; ++i) { - if (!glob_match(filter->suite_glob, subsuite[i]->name)) - continue; - filtered_suite = kunit_filter_tests(subsuite[i], filter->test_glob); - if (filtered_suite) - filtered[n++] = filtered_suite; - } - filtered[n] = NULL; - - return filtered; -} - +/* Stores an array of suites, end points one past the end */ struct suite_set { - struct kunit_suite * const * const *start; - struct kunit_suite * const * const *end; + struct kunit_suite * const *start; + struct kunit_suite * const *end; }; -static void kunit_free_subsuite(struct kunit_suite * const *subsuite) -{ - unsigned int i; - - for (i = 0; subsuite[i]; i++) - kfree(subsuite[i]); - - kfree(subsuite); -} - static void kunit_free_suite_set(struct suite_set suite_set) { - struct kunit_suite * const * const *suites; + struct kunit_suite * const *suites; for (suites = suite_set.start; suites < suite_set.end; suites++) - kunit_free_subsuite(*suites); + kfree(*suites); kfree(suite_set.start); } static struct suite_set kunit_filter_suites(const struct suite_set *suite_set, - const char *filter_glob) + const char *filter_glob, + int *err) { int i; - struct kunit_suite * const **copy, * const *filtered_subsuite; + struct kunit_suite **copy, *filtered_suite; struct suite_set filtered; struct kunit_test_filter filter; @@ -164,10 +127,19 @@ static struct suite_set kunit_filter_suites(const struct suite_set *suite_set, kunit_parse_filter_glob(&filter, filter_glob); - for (i = 0; i < max; ++i) { - filtered_subsuite = kunit_filter_subsuite(suite_set->start[i], &filter); - if (filtered_subsuite) - *copy++ = filtered_subsuite; + for (i = 0; &suite_set->start[i] != suite_set->end; i++) { + if (!glob_match(filter.suite_glob, suite_set->start[i]->name)) + continue; + + filtered_suite = kunit_filter_tests(suite_set->start[i], filter.test_glob); + if (IS_ERR(filtered_suite)) { + *err = PTR_ERR(filtered_suite); + return filtered; + } + if (!filtered_suite) + continue; + + *copy++ = filtered_suite; } filtered.end = copy; @@ -190,55 +162,42 @@ static void kunit_handle_shutdown(void) } -static void kunit_print_tap_header(struct suite_set *suite_set) -{ - struct kunit_suite * const * const *suites, * const *subsuite; - int num_of_suites = 0; - - for (suites = suite_set->start; suites < suite_set->end; suites++) - for (subsuite = *suites; *subsuite != NULL; subsuite++) - num_of_suites++; - - pr_info("TAP version 14\n"); - pr_info("1..%d\n", num_of_suites); -} - static void kunit_exec_run_tests(struct suite_set *suite_set) { - struct kunit_suite * const * const *suites; + size_t num_suites = suite_set->end - suite_set->start; - kunit_print_tap_header(suite_set); + pr_info("TAP version 14\n"); + pr_info("1..%zu\n", num_suites); - for (suites = suite_set->start; suites < suite_set->end; suites++) - __kunit_test_suites_init(*suites); + __kunit_test_suites_init(suite_set->start, num_suites); } static void kunit_exec_list_tests(struct suite_set *suite_set) { - unsigned int i; - struct kunit_suite * const * const *suites; + struct kunit_suite * const *suites; struct kunit_case *test_case; /* Hack: print a tap header so kunit.py can find the start of KUnit output. */ pr_info("TAP version 14\n"); for (suites = suite_set->start; suites < suite_set->end; suites++) - for (i = 0; (*suites)[i] != NULL; i++) { - kunit_suite_for_each_test_case((*suites)[i], test_case) { - pr_info("%s.%s\n", (*suites)[i]->name, test_case->name); - } + kunit_suite_for_each_test_case((*suites), test_case) { + pr_info("%s.%s\n", (*suites)->name, test_case->name); } } int kunit_run_all_tests(void) { - struct suite_set suite_set = { - .start = __kunit_suites_start, - .end = __kunit_suites_end, - }; - - if (filter_glob_param) - suite_set = kunit_filter_suites(&suite_set, filter_glob_param); + struct suite_set suite_set = {__kunit_suites_start, __kunit_suites_end}; + int err = 0; + + if (filter_glob_param) { + suite_set = kunit_filter_suites(&suite_set, filter_glob_param, &err); + if (err) { + pr_err("kunit executor: error filtering suites: %d\n", err); + goto out; + } + } if (!action_param) kunit_exec_run_tests(&suite_set); @@ -247,13 +206,13 @@ int kunit_run_all_tests(void) else pr_err("kunit executor: unknown action '%s'\n", action_param); - if (filter_glob_param) { /* a copy was made of each array */ + if (filter_glob_param) { /* a copy was made of each suite */ kunit_free_suite_set(suite_set); } +out: kunit_handle_shutdown(); - - return 0; + return err; } #if IS_BUILTIN(CONFIG_KUNIT_TEST) diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c index 4ed57fd94e42..0cea31c27b23 100644 --- a/lib/kunit/executor_test.c +++ b/lib/kunit/executor_test.c @@ -9,8 +9,6 @@ #include <kunit/test.h> static void kfree_at_end(struct kunit *test, const void *to_free); -static void free_subsuite_at_end(struct kunit *test, - struct kunit_suite *const *to_free); static struct kunit_suite *alloc_fake_suite(struct kunit *test, const char *suite_name, struct kunit_case *test_cases); @@ -41,124 +39,80 @@ static void parse_filter_test(struct kunit *test) kfree(filter.test_glob); } -static void filter_subsuite_test(struct kunit *test) +static void filter_suites_test(struct kunit *test) { - struct kunit_suite *subsuite[3] = {NULL, NULL, NULL}; - struct kunit_suite * const *filtered; - struct kunit_test_filter filter = { - .suite_glob = "suite2", - .test_glob = NULL, - }; + struct kunit_suite *subsuite[3] = {NULL, NULL}; + struct suite_set suite_set = {.start = subsuite, .end = &subsuite[2]}; + struct suite_set got; + int err = 0; subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases); subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases); /* Want: suite1, suite2, NULL -> suite2, NULL */ - filtered = kunit_filter_subsuite(subsuite, &filter); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered); - free_subsuite_at_end(test, filtered); + got = kunit_filter_suites(&suite_set, "suite2", &err); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start); + KUNIT_ASSERT_EQ(test, err, 0); + kfree_at_end(test, got.start); /* Validate we just have suite2 */ - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]); - KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->name, "suite2"); - KUNIT_EXPECT_FALSE(test, filtered[1]); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]); + KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->name, "suite2"); + + /* Contains one element (end is 1 past end) */ + KUNIT_ASSERT_EQ(test, got.end - got.start, 1); } -static void filter_subsuite_test_glob_test(struct kunit *test) +static void filter_suites_test_glob_test(struct kunit *test) { - struct kunit_suite *subsuite[3] = {NULL, NULL, NULL}; - struct kunit_suite * const *filtered; - struct kunit_test_filter filter = { - .suite_glob = "suite2", - .test_glob = "test2", - }; + struct kunit_suite *subsuite[3] = {NULL, NULL}; + struct suite_set suite_set = {.start = subsuite, .end = &subsuite[2]}; + struct suite_set got; + int err = 0; subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases); subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases); /* Want: suite1, suite2, NULL -> suite2 (just test1), NULL */ - filtered = kunit_filter_subsuite(subsuite, &filter); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered); - free_subsuite_at_end(test, filtered); + got = kunit_filter_suites(&suite_set, "suite2.test2", &err); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start); + KUNIT_ASSERT_EQ(test, err, 0); + kfree_at_end(test, got.start); /* Validate we just have suite2 */ - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]); - KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->name, "suite2"); - KUNIT_EXPECT_FALSE(test, filtered[1]); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]); + KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->name, "suite2"); + KUNIT_ASSERT_EQ(test, got.end - got.start, 1); /* Now validate we just have test2 */ - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]->test_cases); - KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->test_cases[0].name, "test2"); - KUNIT_EXPECT_FALSE(test, filtered[0]->test_cases[1].name); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases); + KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->test_cases[0].name, "test2"); + KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].name); } -static void filter_subsuite_to_empty_test(struct kunit *test) +static void filter_suites_to_empty_test(struct kunit *test) { - struct kunit_suite *subsuite[3] = {NULL, NULL, NULL}; - struct kunit_suite * const *filtered; - struct kunit_test_filter filter = { - .suite_glob = "not_found", - .test_glob = NULL, - }; + struct kunit_suite *subsuite[3] = {NULL, NULL}; + struct suite_set suite_set = {.start = subsuite, .end = &subsuite[2]}; + struct suite_set got; + int err = 0; subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases); subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases); - filtered = kunit_filter_subsuite(subsuite, &filter); - free_subsuite_at_end(test, filtered); /* just in case */ + got = kunit_filter_suites(&suite_set, "not_found", &err); + KUNIT_ASSERT_EQ(test, err, 0); + kfree_at_end(test, got.start); /* just in case */ - KUNIT_EXPECT_FALSE_MSG(test, filtered, - "should be NULL to indicate no match"); -} - -static void kfree_subsuites_at_end(struct kunit *test, struct suite_set *suite_set) -{ - struct kunit_suite * const * const *suites; - - kfree_at_end(test, suite_set->start); - for (suites = suite_set->start; suites < suite_set->end; suites++) - free_subsuite_at_end(test, *suites); -} - -static void filter_suites_test(struct kunit *test) -{ - /* Suites per-file are stored as a NULL terminated array */ - struct kunit_suite *subsuites[2][2] = { - {NULL, NULL}, - {NULL, NULL}, - }; - /* Match the memory layout of suite_set */ - struct kunit_suite * const * const suites[2] = { - subsuites[0], subsuites[1], - }; - - const struct suite_set suite_set = { - .start = suites, - .end = suites + 2, - }; - struct suite_set filtered = {.start = NULL, .end = NULL}; - - /* Emulate two files, each having one suite */ - subsuites[0][0] = alloc_fake_suite(test, "suite0", dummy_test_cases); - subsuites[1][0] = alloc_fake_suite(test, "suite1", dummy_test_cases); - - /* Filter out suite1 */ - filtered = kunit_filter_suites(&suite_set, "suite0"); - kfree_subsuites_at_end(test, &filtered); /* let us use ASSERTs without leaking */ - KUNIT_ASSERT_EQ(test, filtered.end - filtered.start, (ptrdiff_t)1); - - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0]); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0][0]); - KUNIT_EXPECT_STREQ(test, (const char *)filtered.start[0][0]->name, "suite0"); + KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end, + "should be empty to indicate no match"); } static struct kunit_case executor_test_cases[] = { KUNIT_CASE(parse_filter_test), - KUNIT_CASE(filter_subsuite_test), - KUNIT_CASE(filter_subsuite_test_glob_test), - KUNIT_CASE(filter_subsuite_to_empty_test), KUNIT_CASE(filter_suites_test), + KUNIT_CASE(filter_suites_test_glob_test), + KUNIT_CASE(filter_suites_to_empty_test), {} }; @@ -188,20 +142,6 @@ static void kfree_at_end(struct kunit *test, const void *to_free) (void *)to_free); } -static void free_subsuite_res_free(struct kunit_resource *res) -{ - kunit_free_subsuite(res->data); -} - -static void free_subsuite_at_end(struct kunit *test, - struct kunit_suite *const *to_free) -{ - if (IS_ERR_OR_NULL(to_free)) - return; - kunit_alloc_resource(test, NULL, free_subsuite_res_free, - GFP_KERNEL, (void *)to_free); -} - static struct kunit_suite *alloc_fake_suite(struct kunit *test, const char *suite_name, struct kunit_case *test_cases) diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c index 4bbf37c04eba..f8fe582c9e36 100644 --- a/lib/kunit/kunit-example-test.c +++ b/lib/kunit/kunit-example-test.c @@ -41,6 +41,17 @@ static int example_test_init(struct kunit *test) } /* + * This is run once before all test cases in the suite. + * See the comment on example_test_suite for more information. + */ +static int example_test_init_suite(struct kunit_suite *suite) +{ + kunit_info(suite, "initializing suite\n"); + + return 0; +} + +/* * This test should always be skipped. */ static void example_skip_test(struct kunit *test) @@ -91,6 +102,8 @@ static void example_all_expect_macros_test(struct kunit *test) KUNIT_EXPECT_NOT_ERR_OR_NULL(test, test); KUNIT_EXPECT_PTR_EQ(test, NULL, NULL); KUNIT_EXPECT_PTR_NE(test, test, NULL); + KUNIT_EXPECT_NULL(test, NULL); + KUNIT_EXPECT_NOT_NULL(test, test); /* String assertions */ KUNIT_EXPECT_STREQ(test, "hi", "hi"); @@ -140,17 +153,20 @@ static struct kunit_case example_test_cases[] = { * may be specified which runs after every test case and can be used to for * cleanup. For clarity, running tests in a test suite would behave as follows: * + * suite.suite_init(suite); * suite.init(test); * suite.test_case[0](test); * suite.exit(test); * suite.init(test); * suite.test_case[1](test); * suite.exit(test); + * suite.suite_exit(suite); * ...; */ static struct kunit_suite example_test_suite = { .name = "example", .init = example_test_init, + .suite_init = example_test_init_suite, .test_cases = example_test_cases, }; diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 555601d17f79..13d0bd8b07a9 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -190,6 +190,40 @@ static void kunit_resource_test_destroy_resource(struct kunit *test) KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources)); } +static void kunit_resource_test_remove_resource(struct kunit *test) +{ + struct kunit_test_resource_context *ctx = test->priv; + struct kunit_resource *res = kunit_alloc_and_get_resource( + &ctx->test, + fake_resource_init, + fake_resource_free, + GFP_KERNEL, + ctx); + + /* The resource is in the list */ + KUNIT_EXPECT_FALSE(test, list_empty(&ctx->test.resources)); + + /* Remove the resource. The pointer is still valid, but it can't be + * found. + */ + kunit_remove_resource(test, res); + KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources)); + /* We haven't been freed yet. */ + KUNIT_EXPECT_TRUE(test, ctx->is_resource_initialized); + + /* Removing the resource multiple times is valid. */ + kunit_remove_resource(test, res); + KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources)); + /* Despite having been removed twice (from only one reference), the + * resource still has not been freed. + */ + KUNIT_EXPECT_TRUE(test, ctx->is_resource_initialized); + + /* Free the resource. */ + kunit_put_resource(res); + KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized); +} + static void kunit_resource_test_cleanup_resources(struct kunit *test) { int i; @@ -387,6 +421,7 @@ static struct kunit_case kunit_resource_test_cases[] = { KUNIT_CASE(kunit_resource_test_init_resources), KUNIT_CASE(kunit_resource_test_alloc_resource), KUNIT_CASE(kunit_resource_test_destroy_resource), + KUNIT_CASE(kunit_resource_test_remove_resource), KUNIT_CASE(kunit_resource_test_cleanup_resources), KUNIT_CASE(kunit_resource_test_proper_free_ordering), KUNIT_CASE(kunit_resource_test_static), @@ -435,7 +470,7 @@ static void kunit_log_test(struct kunit *test) KUNIT_EXPECT_NOT_ERR_OR_NULL(test, strstr(suite.log, "along with this.")); #else - KUNIT_EXPECT_PTR_EQ(test, test->log, (char *)NULL); + KUNIT_EXPECT_NULL(test, test->log); #endif } diff --git a/lib/kunit/resource.c b/lib/kunit/resource.c new file mode 100644 index 000000000000..c414df922f34 --- /dev/null +++ b/lib/kunit/resource.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit resource API for test managed resources (allocations, etc.). + * + * Copyright (C) 2022, Google LLC. + * Author: Daniel Latypov <dlatypov@google.com> + */ + +#include <kunit/resource.h> +#include <kunit/test.h> +#include <linux/kref.h> + +/* + * Used for static resources and when a kunit_resource * has been created by + * kunit_alloc_resource(). When an init function is supplied, @data is passed + * into the init function; otherwise, we simply set the resource data field to + * the data value passed in. Doesn't initialize res->should_kfree. + */ +int __kunit_add_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + struct kunit_resource *res, + void *data) +{ + int ret = 0; + unsigned long flags; + + res->free = free; + kref_init(&res->refcount); + + if (init) { + ret = init(res, data); + if (ret) + return ret; + } else { + res->data = data; + } + + spin_lock_irqsave(&test->lock, flags); + list_add_tail(&res->node, &test->resources); + /* refcount for list is established by kref_init() */ + spin_unlock_irqrestore(&test->lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(__kunit_add_resource); + +void kunit_remove_resource(struct kunit *test, struct kunit_resource *res) +{ + unsigned long flags; + bool was_linked; + + spin_lock_irqsave(&test->lock, flags); + was_linked = !list_empty(&res->node); + list_del_init(&res->node); + spin_unlock_irqrestore(&test->lock, flags); + + if (was_linked) + kunit_put_resource(res); +} +EXPORT_SYMBOL_GPL(kunit_remove_resource); + +int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match, + void *match_data) +{ + struct kunit_resource *res = kunit_find_resource(test, match, + match_data); + + if (!res) + return -ENOENT; + + kunit_remove_resource(test, res); + + /* We have a reference also via _find(); drop it. */ + kunit_put_resource(res); + + return 0; +} +EXPORT_SYMBOL_GPL(kunit_destroy_resource); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 3bca3bf5c15b..b73d5bb5c473 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -6,11 +6,13 @@ * Author: Brendan Higgins <brendanhiggins@google.com> */ +#include <kunit/resource.h> #include <kunit/test.h> #include <kunit/test-bug.h> #include <linux/kernel.h> -#include <linux/kref.h> +#include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/panic.h> #include <linux/sched/debug.h> #include <linux/sched.h> @@ -134,7 +136,7 @@ size_t kunit_suite_num_test_cases(struct kunit_suite *suite) } EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases); -static void kunit_print_subtest_start(struct kunit_suite *suite) +static void kunit_print_suite_start(struct kunit_suite *suite) { kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s", suite->name); @@ -179,6 +181,9 @@ enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite) const struct kunit_case *test_case; enum kunit_status status = KUNIT_SKIPPED; + if (suite->suite_init_err) + return KUNIT_FAILURE; + kunit_suite_for_each_test_case(suite, test_case) { if (test_case->status == KUNIT_FAILURE) return KUNIT_FAILURE; @@ -192,7 +197,7 @@ EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded); static size_t kunit_suite_counter = 1; -static void kunit_print_subtest_end(struct kunit_suite *suite) +static void kunit_print_suite_end(struct kunit_suite *suite) { kunit_print_ok_not_ok((void *)suite, false, kunit_suite_has_succeeded(suite), @@ -241,7 +246,7 @@ static void kunit_print_string_stream(struct kunit *test, } static void kunit_fail(struct kunit *test, const struct kunit_loc *loc, - enum kunit_assert_type type, struct kunit_assert *assert, + enum kunit_assert_type type, const struct kunit_assert *assert, const struct va_format *message) { struct string_stream *stream; @@ -281,7 +286,7 @@ static void __noreturn kunit_abort(struct kunit *test) void kunit_do_failed_assertion(struct kunit *test, const struct kunit_loc *loc, enum kunit_assert_type type, - struct kunit_assert *assert, + const struct kunit_assert *assert, const char *fmt, ...) { va_list args; @@ -498,7 +503,19 @@ int kunit_run_tests(struct kunit_suite *suite) struct kunit_result_stats suite_stats = { 0 }; struct kunit_result_stats total_stats = { 0 }; - kunit_print_subtest_start(suite); + /* Taint the kernel so we know we've run tests. */ + add_taint(TAINT_TEST, LOCKDEP_STILL_OK); + + if (suite->suite_init) { + suite->suite_init_err = suite->suite_init(suite); + if (suite->suite_init_err) { + kunit_err(suite, KUNIT_SUBTEST_INDENT + "# failed to initialize (%d)", suite->suite_init_err); + goto suite_end; + } + } + + kunit_print_suite_start(suite); kunit_suite_for_each_test_case(suite, test_case) { struct kunit test = { .param_value = NULL, .param_index = 0 }; @@ -551,8 +568,12 @@ int kunit_run_tests(struct kunit_suite *suite) kunit_accumulate_stats(&total_stats, param_stats); } + if (suite->suite_exit) + suite->suite_exit(suite); + kunit_print_suite_stats(suite, suite_stats, total_stats); - kunit_print_subtest_end(suite); +suite_end: + kunit_print_suite_end(suite); return 0; } @@ -562,13 +583,14 @@ static void kunit_init_suite(struct kunit_suite *suite) { kunit_debugfs_create_suite(suite); suite->status_comment[0] = '\0'; + suite->suite_init_err = 0; } -int __kunit_test_suites_init(struct kunit_suite * const * const suites) +int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites) { unsigned int i; - for (i = 0; suites[i] != NULL; i++) { + for (i = 0; i < num_suites; i++) { kunit_init_suite(suites[i]); kunit_run_tests(suites[i]); } @@ -581,130 +603,53 @@ static void kunit_exit_suite(struct kunit_suite *suite) kunit_debugfs_destroy_suite(suite); } -void __kunit_test_suites_exit(struct kunit_suite **suites) +void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites) { unsigned int i; - for (i = 0; suites[i] != NULL; i++) + for (i = 0; i < num_suites; i++) kunit_exit_suite(suites[i]); kunit_suite_counter = 1; } EXPORT_SYMBOL_GPL(__kunit_test_suites_exit); -/* - * Used for static resources and when a kunit_resource * has been created by - * kunit_alloc_resource(). When an init function is supplied, @data is passed - * into the init function; otherwise, we simply set the resource data field to - * the data value passed in. - */ -int kunit_add_resource(struct kunit *test, - kunit_resource_init_t init, - kunit_resource_free_t free, - struct kunit_resource *res, - void *data) +#ifdef CONFIG_MODULES +static void kunit_module_init(struct module *mod) { - int ret = 0; - unsigned long flags; - - res->free = free; - kref_init(&res->refcount); - - if (init) { - ret = init(res, data); - if (ret) - return ret; - } else { - res->data = data; - } - - spin_lock_irqsave(&test->lock, flags); - list_add_tail(&res->node, &test->resources); - /* refcount for list is established by kref_init() */ - spin_unlock_irqrestore(&test->lock, flags); - - return ret; + __kunit_test_suites_init(mod->kunit_suites, mod->num_kunit_suites); } -EXPORT_SYMBOL_GPL(kunit_add_resource); -int kunit_add_named_resource(struct kunit *test, - kunit_resource_init_t init, - kunit_resource_free_t free, - struct kunit_resource *res, - const char *name, - void *data) +static void kunit_module_exit(struct module *mod) { - struct kunit_resource *existing; - - if (!name) - return -EINVAL; - - existing = kunit_find_named_resource(test, name); - if (existing) { - kunit_put_resource(existing); - return -EEXIST; - } - - res->name = name; - - return kunit_add_resource(test, init, free, res, data); + __kunit_test_suites_exit(mod->kunit_suites, mod->num_kunit_suites); } -EXPORT_SYMBOL_GPL(kunit_add_named_resource); -struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test, - kunit_resource_init_t init, - kunit_resource_free_t free, - gfp_t internal_gfp, - void *data) +static int kunit_module_notify(struct notifier_block *nb, unsigned long val, + void *data) { - struct kunit_resource *res; - int ret; + struct module *mod = data; - res = kzalloc(sizeof(*res), internal_gfp); - if (!res) - return NULL; - - ret = kunit_add_resource(test, init, free, res, data); - if (!ret) { - /* - * bump refcount for get; kunit_resource_put() should be called - * when done. - */ - kunit_get_resource(res); - return res; + switch (val) { + case MODULE_STATE_LIVE: + kunit_module_init(mod); + break; + case MODULE_STATE_GOING: + kunit_module_exit(mod); + break; + case MODULE_STATE_COMING: + case MODULE_STATE_UNFORMED: + break; } - return NULL; -} -EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource); - -void kunit_remove_resource(struct kunit *test, struct kunit_resource *res) -{ - unsigned long flags; - - spin_lock_irqsave(&test->lock, flags); - list_del(&res->node); - spin_unlock_irqrestore(&test->lock, flags); - kunit_put_resource(res); -} -EXPORT_SYMBOL_GPL(kunit_remove_resource); - -int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match, - void *match_data) -{ - struct kunit_resource *res = kunit_find_resource(test, match, - match_data); - - if (!res) - return -ENOENT; - - kunit_remove_resource(test, res); - - /* We have a reference also via _find(); drop it. */ - kunit_put_resource(res); return 0; } -EXPORT_SYMBOL_GPL(kunit_destroy_resource); + +static struct notifier_block kunit_mod_nb = { + .notifier_call = kunit_module_notify, + .priority = 0, +}; +#endif struct kunit_kmalloc_array_params { size_t n; @@ -800,13 +745,19 @@ EXPORT_SYMBOL_GPL(kunit_cleanup); static int __init kunit_init(void) { kunit_debugfs_init(); - +#ifdef CONFIG_MODULES + return register_module_notifier(&kunit_mod_nb); +#else return 0; +#endif } late_initcall(kunit_init); static void __exit kunit_exit(void) { +#ifdef CONFIG_MODULES + unregister_module_notifier(&kunit_mod_nb); +#endif kunit_debugfs_cleanup(); } module_exit(kunit_exit); diff --git a/lib/list-test.c b/lib/list-test.c index 035ef6597640..d374cf5d1a57 100644 --- a/lib/list-test.c +++ b/lib/list-test.c @@ -804,6 +804,401 @@ static struct kunit_suite list_test_module = { .test_cases = list_test_cases, }; -kunit_test_suites(&list_test_module); +struct hlist_test_struct { + int data; + struct hlist_node list; +}; + +static void hlist_test_init(struct kunit *test) +{ + /* Test the different ways of initialising a list. */ + struct hlist_head list1 = HLIST_HEAD_INIT; + struct hlist_head list2; + HLIST_HEAD(list3); + struct hlist_head *list4; + struct hlist_head *list5; + + INIT_HLIST_HEAD(&list2); + + list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL); + INIT_HLIST_HEAD(list4); + + list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL); + memset(list5, 0xFF, sizeof(*list5)); + INIT_HLIST_HEAD(list5); + + KUNIT_EXPECT_TRUE(test, hlist_empty(&list1)); + KUNIT_EXPECT_TRUE(test, hlist_empty(&list2)); + KUNIT_EXPECT_TRUE(test, hlist_empty(&list3)); + KUNIT_EXPECT_TRUE(test, hlist_empty(list4)); + KUNIT_EXPECT_TRUE(test, hlist_empty(list5)); + + kfree(list4); + kfree(list5); +} + +static void hlist_test_unhashed(struct kunit *test) +{ + struct hlist_node a; + HLIST_HEAD(list); + + INIT_HLIST_NODE(&a); + + /* is unhashed by default */ + KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a)); + + hlist_add_head(&a, &list); + + /* is hashed once added to list */ + KUNIT_EXPECT_FALSE(test, hlist_unhashed(&a)); + + hlist_del_init(&a); + + /* is again unhashed after del_init */ + KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a)); +} + +/* Doesn't test concurrency guarantees */ +static void hlist_test_unhashed_lockless(struct kunit *test) +{ + struct hlist_node a; + HLIST_HEAD(list); + + INIT_HLIST_NODE(&a); + + /* is unhashed by default */ + KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a)); + + hlist_add_head(&a, &list); + + /* is hashed once added to list */ + KUNIT_EXPECT_FALSE(test, hlist_unhashed_lockless(&a)); + + hlist_del_init(&a); + + /* is again unhashed after del_init */ + KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a)); +} + +static void hlist_test_del(struct kunit *test) +{ + struct hlist_node a, b; + HLIST_HEAD(list); + + hlist_add_head(&a, &list); + hlist_add_behind(&b, &a); + + /* before: [list] -> a -> b */ + hlist_del(&a); + + /* now: [list] -> b */ + KUNIT_EXPECT_PTR_EQ(test, list.first, &b); + KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first); +} + +static void hlist_test_del_init(struct kunit *test) +{ + struct hlist_node a, b; + HLIST_HEAD(list); + + hlist_add_head(&a, &list); + hlist_add_behind(&b, &a); + + /* before: [list] -> a -> b */ + hlist_del_init(&a); + + /* now: [list] -> b */ + KUNIT_EXPECT_PTR_EQ(test, list.first, &b); + KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first); + + /* a is now initialised */ + KUNIT_EXPECT_PTR_EQ(test, a.next, NULL); + KUNIT_EXPECT_PTR_EQ(test, a.pprev, NULL); +} + +/* Tests all three hlist_add_* functions */ +static void hlist_test_add(struct kunit *test) +{ + struct hlist_node a, b, c, d; + HLIST_HEAD(list); + + hlist_add_head(&a, &list); + hlist_add_head(&b, &list); + hlist_add_before(&c, &a); + hlist_add_behind(&d, &a); + + /* should be [list] -> b -> c -> a -> d */ + KUNIT_EXPECT_PTR_EQ(test, list.first, &b); + + KUNIT_EXPECT_PTR_EQ(test, c.pprev, &(b.next)); + KUNIT_EXPECT_PTR_EQ(test, b.next, &c); + + KUNIT_EXPECT_PTR_EQ(test, a.pprev, &(c.next)); + KUNIT_EXPECT_PTR_EQ(test, c.next, &a); + + KUNIT_EXPECT_PTR_EQ(test, d.pprev, &(a.next)); + KUNIT_EXPECT_PTR_EQ(test, a.next, &d); +} + +/* Tests both hlist_fake() and hlist_add_fake() */ +static void hlist_test_fake(struct kunit *test) +{ + struct hlist_node a; + + INIT_HLIST_NODE(&a); + + /* not fake after init */ + KUNIT_EXPECT_FALSE(test, hlist_fake(&a)); + + hlist_add_fake(&a); + + /* is now fake */ + KUNIT_EXPECT_TRUE(test, hlist_fake(&a)); +} + +static void hlist_test_is_singular_node(struct kunit *test) +{ + struct hlist_node a, b; + HLIST_HEAD(list); + + INIT_HLIST_NODE(&a); + KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list)); + + hlist_add_head(&a, &list); + KUNIT_EXPECT_TRUE(test, hlist_is_singular_node(&a, &list)); + + hlist_add_head(&b, &list); + KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list)); + KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&b, &list)); +} + +static void hlist_test_empty(struct kunit *test) +{ + struct hlist_node a; + HLIST_HEAD(list); + + /* list starts off empty */ + KUNIT_EXPECT_TRUE(test, hlist_empty(&list)); + + hlist_add_head(&a, &list); + + /* list is no longer empty */ + KUNIT_EXPECT_FALSE(test, hlist_empty(&list)); +} + +static void hlist_test_move_list(struct kunit *test) +{ + struct hlist_node a; + HLIST_HEAD(list1); + HLIST_HEAD(list2); + + hlist_add_head(&a, &list1); + + KUNIT_EXPECT_FALSE(test, hlist_empty(&list1)); + KUNIT_EXPECT_TRUE(test, hlist_empty(&list2)); + hlist_move_list(&list1, &list2); + KUNIT_EXPECT_TRUE(test, hlist_empty(&list1)); + KUNIT_EXPECT_FALSE(test, hlist_empty(&list2)); + +} + +static void hlist_test_entry(struct kunit *test) +{ + struct hlist_test_struct test_struct; + + KUNIT_EXPECT_PTR_EQ(test, &test_struct, + hlist_entry(&(test_struct.list), + struct hlist_test_struct, list)); +} + +static void hlist_test_entry_safe(struct kunit *test) +{ + struct hlist_test_struct test_struct; + + KUNIT_EXPECT_PTR_EQ(test, &test_struct, + hlist_entry_safe(&(test_struct.list), + struct hlist_test_struct, list)); + + KUNIT_EXPECT_PTR_EQ(test, NULL, + hlist_entry_safe((struct hlist_node *)NULL, + struct hlist_test_struct, list)); +} + +static void hlist_test_for_each(struct kunit *test) +{ + struct hlist_node entries[3], *cur; + HLIST_HEAD(list); + int i = 0; + + hlist_add_head(&entries[0], &list); + hlist_add_behind(&entries[1], &entries[0]); + hlist_add_behind(&entries[2], &entries[1]); + + hlist_for_each(cur, &list) { + KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); + i++; + } + + KUNIT_EXPECT_EQ(test, i, 3); +} + + +static void hlist_test_for_each_safe(struct kunit *test) +{ + struct hlist_node entries[3], *cur, *n; + HLIST_HEAD(list); + int i = 0; + + hlist_add_head(&entries[0], &list); + hlist_add_behind(&entries[1], &entries[0]); + hlist_add_behind(&entries[2], &entries[1]); + + hlist_for_each_safe(cur, n, &list) { + KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); + hlist_del(&entries[i]); + i++; + } + + KUNIT_EXPECT_EQ(test, i, 3); + KUNIT_EXPECT_TRUE(test, hlist_empty(&list)); +} + +static void hlist_test_for_each_entry(struct kunit *test) +{ + struct hlist_test_struct entries[5], *cur; + HLIST_HEAD(list); + int i = 0; + + entries[0].data = 0; + hlist_add_head(&entries[0].list, &list); + for (i = 1; i < 5; ++i) { + entries[i].data = i; + hlist_add_behind(&entries[i].list, &entries[i-1].list); + } + + i = 0; + + hlist_for_each_entry(cur, &list, list) { + KUNIT_EXPECT_EQ(test, cur->data, i); + i++; + } + + KUNIT_EXPECT_EQ(test, i, 5); +} + +static void hlist_test_for_each_entry_continue(struct kunit *test) +{ + struct hlist_test_struct entries[5], *cur; + HLIST_HEAD(list); + int i = 0; + + entries[0].data = 0; + hlist_add_head(&entries[0].list, &list); + for (i = 1; i < 5; ++i) { + entries[i].data = i; + hlist_add_behind(&entries[i].list, &entries[i-1].list); + } + + /* We skip the first (zero-th) entry. */ + i = 1; + + cur = &entries[0]; + hlist_for_each_entry_continue(cur, list) { + KUNIT_EXPECT_EQ(test, cur->data, i); + /* Stamp over the entry. */ + cur->data = 42; + i++; + } + + KUNIT_EXPECT_EQ(test, i, 5); + /* The first entry was not visited. */ + KUNIT_EXPECT_EQ(test, entries[0].data, 0); + /* The second (and presumably others), were. */ + KUNIT_EXPECT_EQ(test, entries[1].data, 42); +} + +static void hlist_test_for_each_entry_from(struct kunit *test) +{ + struct hlist_test_struct entries[5], *cur; + HLIST_HEAD(list); + int i = 0; + + entries[0].data = 0; + hlist_add_head(&entries[0].list, &list); + for (i = 1; i < 5; ++i) { + entries[i].data = i; + hlist_add_behind(&entries[i].list, &entries[i-1].list); + } + + i = 0; + + cur = &entries[0]; + hlist_for_each_entry_from(cur, list) { + KUNIT_EXPECT_EQ(test, cur->data, i); + /* Stamp over the entry. */ + cur->data = 42; + i++; + } + + KUNIT_EXPECT_EQ(test, i, 5); + /* The first entry was visited. */ + KUNIT_EXPECT_EQ(test, entries[0].data, 42); +} + +static void hlist_test_for_each_entry_safe(struct kunit *test) +{ + struct hlist_test_struct entries[5], *cur; + struct hlist_node *tmp_node; + HLIST_HEAD(list); + int i = 0; + + entries[0].data = 0; + hlist_add_head(&entries[0].list, &list); + for (i = 1; i < 5; ++i) { + entries[i].data = i; + hlist_add_behind(&entries[i].list, &entries[i-1].list); + } + + i = 0; + + hlist_for_each_entry_safe(cur, tmp_node, &list, list) { + KUNIT_EXPECT_EQ(test, cur->data, i); + hlist_del(&cur->list); + i++; + } + + KUNIT_EXPECT_EQ(test, i, 5); + KUNIT_EXPECT_TRUE(test, hlist_empty(&list)); +} + + +static struct kunit_case hlist_test_cases[] = { + KUNIT_CASE(hlist_test_init), + KUNIT_CASE(hlist_test_unhashed), + KUNIT_CASE(hlist_test_unhashed_lockless), + KUNIT_CASE(hlist_test_del), + KUNIT_CASE(hlist_test_del_init), + KUNIT_CASE(hlist_test_add), + KUNIT_CASE(hlist_test_fake), + KUNIT_CASE(hlist_test_is_singular_node), + KUNIT_CASE(hlist_test_empty), + KUNIT_CASE(hlist_test_move_list), + KUNIT_CASE(hlist_test_entry), + KUNIT_CASE(hlist_test_entry_safe), + KUNIT_CASE(hlist_test_for_each), + KUNIT_CASE(hlist_test_for_each_safe), + KUNIT_CASE(hlist_test_for_each_entry), + KUNIT_CASE(hlist_test_for_each_entry_continue), + KUNIT_CASE(hlist_test_for_each_entry_from), + KUNIT_CASE(hlist_test_for_each_entry_safe), + {}, +}; + +static struct kunit_suite hlist_test_module = { + .name = "hlist", + .test_cases = hlist_test_cases, +}; + +kunit_test_suites(&list_test_module, &hlist_test_module); MODULE_LICENSE("GPL v2"); diff --git a/lib/lockref.c b/lib/lockref.c index 5b34bbd3eba8..45e93ece8ba0 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -14,12 +14,11 @@ BUILD_BUG_ON(sizeof(old) != 8); \ old.lock_count = READ_ONCE(lockref->lock_count); \ while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ - struct lockref new = old, prev = old; \ + struct lockref new = old; \ CODE \ - old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \ - old.lock_count, \ - new.lock_count); \ - if (likely(old.lock_count == prev.lock_count)) { \ + if (likely(try_cmpxchg64_relaxed(&lockref->lock_count, \ + &old.lock_count, \ + new.lock_count))) { \ SUCCESS; \ } \ if (!--retry) \ @@ -112,31 +111,6 @@ int lockref_put_not_zero(struct lockref *lockref) EXPORT_SYMBOL(lockref_put_not_zero); /** - * lockref_get_or_lock - Increments count unless the count is 0 or dead - * @lockref: pointer to lockref structure - * Return: 1 if count updated successfully or 0 if count was zero - * and we got the lock instead. - */ -int lockref_get_or_lock(struct lockref *lockref) -{ - CMPXCHG_LOOP( - new.count++; - if (old.count <= 0) - break; - , - return 1; - ); - - spin_lock(&lockref->lock); - if (lockref->count <= 0) - return 0; - lockref->count++; - spin_unlock(&lockref->lock); - return 1; -} -EXPORT_SYMBOL(lockref_get_or_lock); - -/** * lockref_put_return - Decrement reference count if possible * @lockref: pointer to lockref structure * diff --git a/lib/memneq.c b/lib/memneq.c new file mode 100644 index 000000000000..fb11608b1ec1 --- /dev/null +++ b/lib/memneq.c @@ -0,0 +1,176 @@ +/* + * Constant-time equality testing of memory regions. + * + * Authors: + * + * James Yonan <james@openvpn.net> + * Daniel Borkmann <dborkman@redhat.com> + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of OpenVPN Technologies nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <crypto/algapi.h> +#include <asm/unaligned.h> + +#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ + +/* Generic path for arbitrary size */ +static inline unsigned long +__crypto_memneq_generic(const void *a, const void *b, size_t size) +{ + unsigned long neq = 0; + +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + while (size >= sizeof(unsigned long)) { + neq |= get_unaligned((unsigned long *)a) ^ + get_unaligned((unsigned long *)b); + OPTIMIZER_HIDE_VAR(neq); + a += sizeof(unsigned long); + b += sizeof(unsigned long); + size -= sizeof(unsigned long); + } +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + while (size > 0) { + neq |= *(unsigned char *)a ^ *(unsigned char *)b; + OPTIMIZER_HIDE_VAR(neq); + a += 1; + b += 1; + size -= 1; + } + return neq; +} + +/* Loop-free fast-path for frequently used 16-byte size */ +static inline unsigned long __crypto_memneq_16(const void *a, const void *b) +{ + unsigned long neq = 0; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (sizeof(unsigned long) == 8) { + neq |= get_unaligned((unsigned long *)a) ^ + get_unaligned((unsigned long *)b); + OPTIMIZER_HIDE_VAR(neq); + neq |= get_unaligned((unsigned long *)(a + 8)) ^ + get_unaligned((unsigned long *)(b + 8)); + OPTIMIZER_HIDE_VAR(neq); + } else if (sizeof(unsigned int) == 4) { + neq |= get_unaligned((unsigned int *)a) ^ + get_unaligned((unsigned int *)b); + OPTIMIZER_HIDE_VAR(neq); + neq |= get_unaligned((unsigned int *)(a + 4)) ^ + get_unaligned((unsigned int *)(b + 4)); + OPTIMIZER_HIDE_VAR(neq); + neq |= get_unaligned((unsigned int *)(a + 8)) ^ + get_unaligned((unsigned int *)(b + 8)); + OPTIMIZER_HIDE_VAR(neq); + neq |= get_unaligned((unsigned int *)(a + 12)) ^ + get_unaligned((unsigned int *)(b + 12)); + OPTIMIZER_HIDE_VAR(neq); + } else +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + { + neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15); + OPTIMIZER_HIDE_VAR(neq); + } + + return neq; +} + +/* Compare two areas of memory without leaking timing information, + * and with special optimizations for common sizes. Users should + * not call this function directly, but should instead use + * crypto_memneq defined in crypto/algapi.h. + */ +noinline unsigned long __crypto_memneq(const void *a, const void *b, + size_t size) +{ + switch (size) { + case 16: + return __crypto_memneq_16(a, b); + default: + return __crypto_memneq_generic(a, b, size); + } +} +EXPORT_SYMBOL(__crypto_memneq); + +#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ diff --git a/lib/mpi/mpi-add.c b/lib/mpi/mpi-add.c index 2cdae54c1bd0..9056fc5167fc 100644 --- a/lib/mpi/mpi-add.c +++ b/lib/mpi/mpi-add.c @@ -138,7 +138,7 @@ void mpi_sub(MPI w, MPI u, MPI v) mpi_add(w, u, vv); mpi_free(vv); } - +EXPORT_SYMBOL_GPL(mpi_sub); void mpi_addm(MPI w, MPI u, MPI v, MPI m) { diff --git a/lib/mpi/mpi-mul.c b/lib/mpi/mpi-mul.c index 8f5fa200f297..7f4eda8560dc 100644 --- a/lib/mpi/mpi-mul.c +++ b/lib/mpi/mpi-mul.c @@ -82,6 +82,7 @@ void mpi_mul(MPI w, MPI u, MPI v) if (tmp_limb) mpi_free_limb_space(tmp_limb); } +EXPORT_SYMBOL_GPL(mpi_mul); void mpi_mulm(MPI w, MPI u, MPI v, MPI m) { diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 199ab201d501..d01aec6ae15c 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) * Allow nested NMI backtraces while serializing * against other CPUs. */ - printk_cpu_lock_irqsave(flags); + printk_cpu_sync_get_irqsave(flags); if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) { pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n", cpu, (void *)instruction_pointer(regs)); @@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) else dump_stack(); } - printk_cpu_unlock_irqrestore(flags); + printk_cpu_sync_put_irqrestore(flags); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); return true; } diff --git a/lib/nodemask.c b/lib/nodemask.c index 3aa454c54c0d..e22647f5181b 100644 --- a/lib/nodemask.c +++ b/lib/nodemask.c @@ -3,9 +3,9 @@ #include <linux/module.h> #include <linux/random.h> -int __next_node_in(int node, const nodemask_t *srcp) +unsigned int __next_node_in(int node, const nodemask_t *srcp) { - int ret = __next_node(node, srcp); + unsigned int ret = __next_node(node, srcp); if (ret == MAX_NUMNODES) ret = __first_node(srcp); diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c index 475f0c064bf6..7e3e43679b73 100644 --- a/lib/overflow_kunit.c +++ b/lib/overflow_kunit.c @@ -91,6 +91,7 @@ DEFINE_TEST_ARRAY(u32) = { {-4U, 5U, 1U, -9U, -20U, true, false, true}, }; +#if BITS_PER_LONG == 64 DEFINE_TEST_ARRAY(u64) = { {0, 0, 0, 0, 0, false, false, false}, {1, 1, 2, 0, 1, false, false, false}, @@ -114,6 +115,7 @@ DEFINE_TEST_ARRAY(u64) = { false, true, false}, {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true}, }; +#endif DEFINE_TEST_ARRAY(s8) = { {0, 0, 0, 0, 0, false, false, false}, @@ -188,6 +190,8 @@ DEFINE_TEST_ARRAY(s32) = { {S32_MIN, S32_MIN, 0, 0, 0, true, false, true}, {S32_MAX, S32_MAX, -2, 0, 1, true, false, true}, }; + +#if BITS_PER_LONG == 64 DEFINE_TEST_ARRAY(s64) = { {0, 0, 0, 0, 0, false, false, false}, @@ -216,6 +220,7 @@ DEFINE_TEST_ARRAY(s64) = { {-128, -1, -129, -127, 128, false, false, false}, {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false}, }; +#endif #define check_one_op(t, fmt, op, sym, a, b, r, of) do { \ t _r; \ @@ -650,6 +655,7 @@ static struct kunit_case overflow_test_cases[] = { KUNIT_CASE(s16_overflow_test), KUNIT_CASE(u32_overflow_test), KUNIT_CASE(s32_overflow_test), +/* Clang 13 and earlier generate unwanted libcalls on 32-bit. */ #if BITS_PER_LONG == 64 KUNIT_CASE(u64_overflow_test), KUNIT_CASE(s64_overflow_test), diff --git a/lib/sbitmap.c b/lib/sbitmap.c index ae4fd4de9ebe..29eb0484215a 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -528,7 +528,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, sbitmap_deferred_clear(map); if (map->word == (1UL << (map_depth - 1)) - 1) - continue; + goto next; nr = find_first_zero_bit(&map->word, map_depth); if (nr + nr_tags <= map_depth) { @@ -539,6 +539,8 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, get_mask = ((1UL << map_tags) - 1) << nr; do { val = READ_ONCE(map->word); + if ((val & ~get_mask) != val) + goto next; ret = atomic_long_cmpxchg(ptr, val, get_mask | val); } while (ret != val); get_mask = (get_mask & ~ret) >> nr; @@ -549,6 +551,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, return get_mask; } } +next: /* Jump to next index. */ if (++index >= sb->map_nr) index = 0; diff --git a/lib/siphash.c b/lib/siphash.c index 71d315a6ad62..15bc5b6f368c 100644 --- a/lib/siphash.c +++ b/lib/siphash.c @@ -1,6 +1,5 @@ -/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - * - * This file is provided under a dual BSD/GPLv2 license. +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * * SipHash: a fast short-input PRF * https://131002.net/siphash/ diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c index 8662dc6cb509..7a0564d7cb7a 100644 --- a/lib/slub_kunit.c +++ b/lib/slub_kunit.c @@ -12,7 +12,7 @@ static int slab_errors; static void test_clobber_zone(struct kunit *test) { struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0, - SLAB_RED_ZONE, NULL); + SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kasan_disable_current(); @@ -30,7 +30,7 @@ static void test_clobber_zone(struct kunit *test) static void test_next_pointer(struct kunit *test) { struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0, - SLAB_POISON, NULL); + SLAB_POISON|SLAB_NO_USER_FLAGS, NULL); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); unsigned long tmp; unsigned long *ptr_addr; @@ -75,7 +75,7 @@ static void test_next_pointer(struct kunit *test) static void test_first_word(struct kunit *test) { struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0, - SLAB_POISON, NULL); + SLAB_POISON|SLAB_NO_USER_FLAGS, NULL); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kmem_cache_free(s, p); @@ -90,7 +90,7 @@ static void test_first_word(struct kunit *test) static void test_clobber_50th_byte(struct kunit *test) { struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0, - SLAB_POISON, NULL); + SLAB_POISON|SLAB_NO_USER_FLAGS, NULL); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kmem_cache_free(s, p); @@ -106,7 +106,7 @@ static void test_clobber_50th_byte(struct kunit *test) static void test_clobber_redzone_free(struct kunit *test) { struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0, - SLAB_RED_ZONE, NULL); + SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kasan_disable_current(); diff --git a/lib/stackdepot.c b/lib/stackdepot.c index bf5ba9af0500..5ca0d086ef4a 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -66,6 +66,9 @@ struct stack_record { unsigned long entries[]; /* Variable-sized array of entries. */ }; +static bool __stack_depot_want_early_init __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT); +static bool __stack_depot_early_init_passed __initdata; + static void *stack_slabs[STACK_ALLOC_MAX_SLABS]; static int depot_index; @@ -162,38 +165,58 @@ static int __init is_stack_depot_disabled(char *str) } early_param("stack_depot_disable", is_stack_depot_disabled); -/* - * __ref because of memblock_alloc(), which will not be actually called after - * the __init code is gone, because at that point slab_is_available() is true - */ -__ref int stack_depot_init(void) +void __init stack_depot_want_early_init(void) +{ + /* Too late to request early init now */ + WARN_ON(__stack_depot_early_init_passed); + + __stack_depot_want_early_init = true; +} + +int __init stack_depot_early_init(void) +{ + size_t size; + + /* This is supposed to be called only once, from mm_init() */ + if (WARN_ON(__stack_depot_early_init_passed)) + return 0; + + __stack_depot_early_init_passed = true; + + if (!__stack_depot_want_early_init || stack_depot_disable) + return 0; + + size = (STACK_HASH_SIZE * sizeof(struct stack_record *)); + pr_info("Stack Depot early init allocating hash table with memblock_alloc, %zu bytes\n", + size); + stack_table = memblock_alloc(size, SMP_CACHE_BYTES); + + if (!stack_table) { + pr_err("Stack Depot hash table allocation failed, disabling\n"); + stack_depot_disable = true; + return -ENOMEM; + } + + return 0; +} + +int stack_depot_init(void) { static DEFINE_MUTEX(stack_depot_init_mutex); + int ret = 0; mutex_lock(&stack_depot_init_mutex); if (!stack_depot_disable && !stack_table) { - size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *)); - int i; - - if (slab_is_available()) { - pr_info("Stack Depot allocating hash table with kvmalloc\n"); - stack_table = kvmalloc(size, GFP_KERNEL); - } else { - pr_info("Stack Depot allocating hash table with memblock_alloc\n"); - stack_table = memblock_alloc(size, SMP_CACHE_BYTES); - } - if (stack_table) { - for (i = 0; i < STACK_HASH_SIZE; i++) - stack_table[i] = NULL; - } else { + pr_info("Stack Depot allocating hash table with kvcalloc\n"); + stack_table = kvcalloc(STACK_HASH_SIZE, sizeof(struct stack_record *), GFP_KERNEL); + if (!stack_table) { pr_err("Stack Depot hash table allocation failed, disabling\n"); stack_depot_disable = true; - mutex_unlock(&stack_depot_init_mutex); - return -ENOMEM; + ret = -ENOMEM; } } mutex_unlock(&stack_depot_init_mutex); - return 0; + return ret; } EXPORT_SYMBOL_GPL(stack_depot_init); diff --git a/lib/string.c b/lib/string.c index 485777c9da83..6f334420f687 100644 --- a/lib/string.c +++ b/lib/string.c @@ -517,21 +517,13 @@ EXPORT_SYMBOL(strnlen); size_t strspn(const char *s, const char *accept) { const char *p; - const char *a; - size_t count = 0; for (p = s; *p != '\0'; ++p) { - for (a = accept; *a != '\0'; ++a) { - if (*p == *a) - break; - } - if (*a == '\0') - return count; - ++count; + if (!strchr(accept, *p)) + break; } - return count; + return p - s; } - EXPORT_SYMBOL(strspn); #endif @@ -544,17 +536,12 @@ EXPORT_SYMBOL(strspn); size_t strcspn(const char *s, const char *reject) { const char *p; - const char *r; - size_t count = 0; for (p = s; *p != '\0'; ++p) { - for (r = reject; *r != '\0'; ++r) { - if (*p == *r) - return count; - } - ++count; + if (strchr(reject, *p)) + break; } - return count; + return p - s; } EXPORT_SYMBOL(strcspn); #endif diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 4f877e9551d5..5ed3beb066e6 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -757,6 +757,9 @@ char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n) return ERR_PTR(-ENOMEM); } + ptr->n = n; + devres_add(dev, ptr); + return ptr->array; } EXPORT_SYMBOL_GPL(devm_kasprintf_strarray); diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index 0c82f07f74fc..d5923a640457 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -585,6 +585,30 @@ static void __init test_bitmap_arr32(void) } } +static void __init test_bitmap_arr64(void) +{ + unsigned int nbits, next_bit; + u64 arr[EXP1_IN_BITS / 64]; + DECLARE_BITMAP(bmap2, EXP1_IN_BITS); + + memset(arr, 0xa5, sizeof(arr)); + + for (nbits = 0; nbits < EXP1_IN_BITS; ++nbits) { + memset(bmap2, 0xff, sizeof(arr)); + bitmap_to_arr64(arr, exp1, nbits); + bitmap_from_arr64(bmap2, arr, nbits); + expect_eq_bitmap(bmap2, exp1, nbits); + + next_bit = find_next_bit(bmap2, round_up(nbits, BITS_PER_LONG), nbits); + if (next_bit < round_up(nbits, BITS_PER_LONG)) + pr_err("bitmap_copy_arr64(nbits == %d:" + " tail is not safely cleared: %d\n", nbits, next_bit); + + if (nbits < EXP1_IN_BITS - 64) + expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5); + } +} + static void noinline __init test_mem_optimisations(void) { DECLARE_BITMAP(bmap1, 1024); @@ -852,6 +876,7 @@ static void __init selftest(void) test_copy(); test_replace(); test_bitmap_arr32(); + test_bitmap_arr64(); test_bitmap_parse(); test_bitmap_parselist(); test_bitmap_printlist(); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 0c5cb2d6436a..5820704165a6 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -53,6 +53,7 @@ #define FLAG_EXPECTED_FAIL BIT(1) #define FLAG_SKB_FRAG BIT(2) #define FLAG_VERIFIER_ZEXT BIT(3) +#define FLAG_LARGE_MEM BIT(4) enum { CLASSIC = BIT(6), /* Old BPF instructions only. */ @@ -7838,7 +7839,7 @@ static struct bpf_test tests[] = { }, /* BPF_LDX_MEM B/H/W/DW */ { - "BPF_LDX_MEM | BPF_B", + "BPF_LDX_MEM | BPF_B, base", .u.insns_int = { BPF_LD_IMM64(R1, 0x0102030405060708ULL), BPF_LD_IMM64(R2, 0x0000000000000008ULL), @@ -7878,7 +7879,56 @@ static struct bpf_test tests[] = { .stack_depth = 8, }, { - "BPF_LDX_MEM | BPF_H", + "BPF_LDX_MEM | BPF_B, negative offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000000000088ULL), + BPF_ALU64_IMM(BPF_ADD, R1, 512), + BPF_STX_MEM(BPF_B, R1, R2, -256), + BPF_LDX_MEM(BPF_B, R0, R1, -256), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_B, small positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000000000088ULL), + BPF_STX_MEM(BPF_B, R1, R2, 256), + BPF_LDX_MEM(BPF_B, R0, R1, 256), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_B, large positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000000000088ULL), + BPF_STX_MEM(BPF_B, R1, R2, 4096), + BPF_LDX_MEM(BPF_B, R0, R1, 4096), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 4096 + 16, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_H, base", .u.insns_int = { BPF_LD_IMM64(R1, 0x0102030405060708ULL), BPF_LD_IMM64(R2, 0x0000000000000708ULL), @@ -7918,7 +7968,72 @@ static struct bpf_test tests[] = { .stack_depth = 8, }, { - "BPF_LDX_MEM | BPF_W", + "BPF_LDX_MEM | BPF_H, negative offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000000008788ULL), + BPF_ALU64_IMM(BPF_ADD, R1, 512), + BPF_STX_MEM(BPF_H, R1, R2, -256), + BPF_LDX_MEM(BPF_H, R0, R1, -256), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_H, small positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000000008788ULL), + BPF_STX_MEM(BPF_H, R1, R2, 256), + BPF_LDX_MEM(BPF_H, R0, R1, 256), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_H, large positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000000008788ULL), + BPF_STX_MEM(BPF_H, R1, R2, 8192), + BPF_LDX_MEM(BPF_H, R0, R1, 8192), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 8192 + 16, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_H, unaligned positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000000008788ULL), + BPF_STX_MEM(BPF_H, R1, R2, 13), + BPF_LDX_MEM(BPF_H, R0, R1, 13), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 32, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_W, base", .u.insns_int = { BPF_LD_IMM64(R1, 0x0102030405060708ULL), BPF_LD_IMM64(R2, 0x0000000005060708ULL), @@ -7957,6 +8072,162 @@ static struct bpf_test tests[] = { { { 0, 0 } }, .stack_depth = 8, }, + { + "BPF_LDX_MEM | BPF_W, negative offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000085868788ULL), + BPF_ALU64_IMM(BPF_ADD, R1, 512), + BPF_STX_MEM(BPF_W, R1, R2, -256), + BPF_LDX_MEM(BPF_W, R0, R1, -256), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_W, small positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000085868788ULL), + BPF_STX_MEM(BPF_W, R1, R2, 256), + BPF_LDX_MEM(BPF_W, R0, R1, 256), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_W, large positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000085868788ULL), + BPF_STX_MEM(BPF_W, R1, R2, 16384), + BPF_LDX_MEM(BPF_W, R0, R1, 16384), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 16384 + 16, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_W, unaligned positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000085868788ULL), + BPF_STX_MEM(BPF_W, R1, R2, 13), + BPF_LDX_MEM(BPF_W, R0, R1, 13), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 32, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_DW, base", + .u.insns_int = { + BPF_LD_IMM64(R1, 0x0102030405060708ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), + BPF_LDX_MEM(BPF_DW, R0, R10, -8), + BPF_JMP_REG(BPF_JNE, R0, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } }, + .stack_depth = 8, + }, + { + "BPF_LDX_MEM | BPF_DW, MSB set", + .u.insns_int = { + BPF_LD_IMM64(R1, 0x8182838485868788ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), + BPF_LDX_MEM(BPF_DW, R0, R10, -8), + BPF_JMP_REG(BPF_JNE, R0, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } }, + .stack_depth = 8, + }, + { + "BPF_LDX_MEM | BPF_DW, negative offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_ALU64_IMM(BPF_ADD, R1, 512), + BPF_STX_MEM(BPF_DW, R1, R2, -256), + BPF_LDX_MEM(BPF_DW, R0, R1, -256), + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_DW, small positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_STX_MEM(BPF_DW, R1, R2, 256), + BPF_LDX_MEM(BPF_DW, R0, R1, 256), + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 8, + }, + { + "BPF_LDX_MEM | BPF_DW, large positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_STX_MEM(BPF_DW, R1, R2, 32760), + BPF_LDX_MEM(BPF_DW, R0, R1, 32760), + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 32768, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_DW, unaligned positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_STX_MEM(BPF_DW, R1, R2, 13), + BPF_LDX_MEM(BPF_DW, R0, R1, 13), + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 32, 0 } }, + .stack_depth = 0, + }, /* BPF_STX_MEM B/H/W/DW */ { "BPF_STX_MEM | BPF_B", @@ -14094,6 +14365,9 @@ static void *generate_test_data(struct bpf_test *test, int sub) if (test->aux & FLAG_NO_DATA) return NULL; + if (test->aux & FLAG_LARGE_MEM) + return kmalloc(test->test[sub].data_size, GFP_KERNEL); + /* Test case expects an skb, so populate one. Various * subtests generate skbs of different sizes based on * the same data. @@ -14137,7 +14411,10 @@ static void release_test_data(const struct bpf_test *test, void *data) if (test->aux & FLAG_NO_DATA) return; - kfree_skb(data); + if (test->aux & FLAG_LARGE_MEM) + kfree(data); + else + kfree_skb(data); } static int filter_length(int which) @@ -14456,9 +14733,9 @@ static struct skb_segment_test skb_segment_tests[] __initconst = { .build_skb = build_test_skb_linear_no_head_frag, .features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_GSO | - NETIF_F_LLTX_BIT | NETIF_F_GRO | + NETIF_F_LLTX | NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_STAG_TX_BIT + NETIF_F_HW_VLAN_STAG_TX } }; @@ -14674,6 +14951,36 @@ static struct tail_call_test tail_call_tests[] = { .result = 10, }, { + "Tail call load/store leaf", + .insns = { + BPF_ALU64_IMM(BPF_MOV, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R2, 2), + BPF_ALU64_REG(BPF_MOV, R3, BPF_REG_FP), + BPF_STX_MEM(BPF_DW, R3, R1, -8), + BPF_STX_MEM(BPF_DW, R3, R2, -16), + BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -8), + BPF_JMP_REG(BPF_JNE, R0, R1, 3), + BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -16), + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + .result = 0, + .stack_depth = 32, + }, + { + "Tail call load/store", + .insns = { + BPF_ALU64_IMM(BPF_MOV, R0, 3), + BPF_STX_MEM(BPF_DW, BPF_REG_FP, R0, -8), + TAIL_CALL(-1), + BPF_ALU64_IMM(BPF_MOV, R0, -1), + BPF_EXIT_INSN(), + }, + .result = 0, + .stack_depth = 16, + }, + { "Tail call error path, max count reached", .insns = { BPF_LDX_MEM(BPF_W, R2, R1, 0), diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 1bccd6cd5f48..c82b65947ce6 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -31,9 +31,12 @@ MODULE_IMPORT_NS(TEST_FIRMWARE); #define TEST_FIRMWARE_NAME "test-firmware.bin" #define TEST_FIRMWARE_NUM_REQS 4 #define TEST_FIRMWARE_BUF_SIZE SZ_1K +#define TEST_UPLOAD_MAX_SIZE SZ_2K +#define TEST_UPLOAD_BLK_SIZE 37 /* Avoid powers of two in testing */ static DEFINE_MUTEX(test_fw_mutex); static const struct firmware *test_firmware; +static LIST_HEAD(test_upload_list); struct test_batched_req { u8 idx; @@ -63,6 +66,7 @@ struct test_batched_req { * @reqs: stores all requests information * @read_fw_idx: index of thread from which we want to read firmware results * from through the read_fw trigger. + * @upload_name: firmware name to be used with upload_read sysfs node * @test_result: a test may use this to collect the result from the call * of the request_firmware*() calls used in their tests. In order of * priority we always keep first any setup error. If no setup errors were @@ -101,6 +105,7 @@ struct test_config { bool send_uevent; u8 num_requests; u8 read_fw_idx; + char *upload_name; /* * These below don't belong her but we'll move them once we create @@ -112,8 +117,34 @@ struct test_config { struct device *device); }; +struct upload_inject_err { + const char *prog; + enum fw_upload_err err_code; +}; + +struct test_firmware_upload { + char *name; + struct list_head node; + char *buf; + size_t size; + bool cancel_request; + struct upload_inject_err inject; + struct fw_upload *fwl; +}; + static struct test_config *test_fw_config; +static struct test_firmware_upload *upload_lookup_name(const char *name) +{ + struct test_firmware_upload *tst; + + list_for_each_entry(tst, &test_upload_list, node) + if (strncmp(name, tst->name, strlen(tst->name)) == 0) + return tst; + + return NULL; +} + static ssize_t test_fw_misc_read(struct file *f, char __user *buf, size_t size, loff_t *offset) { @@ -198,6 +229,7 @@ static int __test_firmware_config_init(void) test_fw_config->req_firmware = request_firmware; test_fw_config->test_result = 0; test_fw_config->reqs = NULL; + test_fw_config->upload_name = NULL; return 0; @@ -277,6 +309,13 @@ static ssize_t config_show(struct device *dev, test_fw_config->sync_direct ? "true" : "false"); len += scnprintf(buf + len, PAGE_SIZE - len, "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx); + if (test_fw_config->upload_name) + len += scnprintf(buf + len, PAGE_SIZE - len, + "upload_name:\t%s\n", + test_fw_config->upload_name); + else + len += scnprintf(buf + len, PAGE_SIZE - len, + "upload_name:\tEMTPY\n"); mutex_unlock(&test_fw_mutex); @@ -392,6 +431,32 @@ static ssize_t config_name_show(struct device *dev, } static DEVICE_ATTR_RW(config_name); +static ssize_t config_upload_name_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct test_firmware_upload *tst; + int ret = count; + + mutex_lock(&test_fw_mutex); + tst = upload_lookup_name(buf); + if (tst) + test_fw_config->upload_name = tst->name; + else + ret = -EINVAL; + mutex_unlock(&test_fw_mutex); + + return ret; +} + +static ssize_t config_upload_name_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return config_test_show_str(buf, test_fw_config->upload_name); +} +static DEVICE_ATTR_RW(config_upload_name); + static ssize_t config_num_requests_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -989,6 +1054,278 @@ out: } static DEVICE_ATTR_WO(trigger_batched_requests_async); +static void upload_release(struct test_firmware_upload *tst) +{ + firmware_upload_unregister(tst->fwl); + kfree(tst->buf); + kfree(tst->name); + kfree(tst); +} + +static void upload_release_all(void) +{ + struct test_firmware_upload *tst, *tmp; + + list_for_each_entry_safe(tst, tmp, &test_upload_list, node) { + list_del(&tst->node); + upload_release(tst); + } + test_fw_config->upload_name = NULL; +} + +/* + * This table is replicated from .../firmware_loader/sysfs_upload.c + * and needs to be kept in sync. + */ +static const char * const fw_upload_err_str[] = { + [FW_UPLOAD_ERR_NONE] = "none", + [FW_UPLOAD_ERR_HW_ERROR] = "hw-error", + [FW_UPLOAD_ERR_TIMEOUT] = "timeout", + [FW_UPLOAD_ERR_CANCELED] = "user-abort", + [FW_UPLOAD_ERR_BUSY] = "device-busy", + [FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size", + [FW_UPLOAD_ERR_RW_ERROR] = "read-write-error", + [FW_UPLOAD_ERR_WEAROUT] = "flash-wearout", +}; + +static void upload_err_inject_error(struct test_firmware_upload *tst, + const u8 *p, const char *prog) +{ + enum fw_upload_err err; + + for (err = FW_UPLOAD_ERR_NONE + 1; err < FW_UPLOAD_ERR_MAX; err++) { + if (strncmp(p, fw_upload_err_str[err], + strlen(fw_upload_err_str[err])) == 0) { + tst->inject.prog = prog; + tst->inject.err_code = err; + return; + } + } +} + +static void upload_err_inject_prog(struct test_firmware_upload *tst, + const u8 *p) +{ + static const char * const progs[] = { + "preparing:", "transferring:", "programming:" + }; + int i; + + for (i = 0; i < ARRAY_SIZE(progs); i++) { + if (strncmp(p, progs[i], strlen(progs[i])) == 0) { + upload_err_inject_error(tst, p + strlen(progs[i]), + progs[i]); + return; + } + } +} + +#define FIVE_MINUTES_MS (5 * 60 * 1000) +static enum fw_upload_err +fw_upload_wait_on_cancel(struct test_firmware_upload *tst) +{ + int ms_delay; + + for (ms_delay = 0; ms_delay < FIVE_MINUTES_MS; ms_delay += 100) { + msleep(100); + if (tst->cancel_request) + return FW_UPLOAD_ERR_CANCELED; + } + return FW_UPLOAD_ERR_NONE; +} + +static enum fw_upload_err test_fw_upload_prepare(struct fw_upload *fwl, + const u8 *data, u32 size) +{ + struct test_firmware_upload *tst = fwl->dd_handle; + enum fw_upload_err ret = FW_UPLOAD_ERR_NONE; + const char *progress = "preparing:"; + + tst->cancel_request = false; + + if (!size || size > TEST_UPLOAD_MAX_SIZE) { + ret = FW_UPLOAD_ERR_INVALID_SIZE; + goto err_out; + } + + if (strncmp(data, "inject:", strlen("inject:")) == 0) + upload_err_inject_prog(tst, data + strlen("inject:")); + + memset(tst->buf, 0, TEST_UPLOAD_MAX_SIZE); + tst->size = size; + + if (tst->inject.err_code == FW_UPLOAD_ERR_NONE || + strncmp(tst->inject.prog, progress, strlen(progress)) != 0) + return FW_UPLOAD_ERR_NONE; + + if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED) + ret = fw_upload_wait_on_cancel(tst); + else + ret = tst->inject.err_code; + +err_out: + /* + * The cleanup op only executes if the prepare op succeeds. + * If the prepare op fails, it must do it's own clean-up. + */ + tst->inject.err_code = FW_UPLOAD_ERR_NONE; + tst->inject.prog = NULL; + + return ret; +} + +static enum fw_upload_err test_fw_upload_write(struct fw_upload *fwl, + const u8 *data, u32 offset, + u32 size, u32 *written) +{ + struct test_firmware_upload *tst = fwl->dd_handle; + const char *progress = "transferring:"; + u32 blk_size; + + if (tst->cancel_request) + return FW_UPLOAD_ERR_CANCELED; + + blk_size = min_t(u32, TEST_UPLOAD_BLK_SIZE, size); + memcpy(tst->buf + offset, data + offset, blk_size); + + *written = blk_size; + + if (tst->inject.err_code == FW_UPLOAD_ERR_NONE || + strncmp(tst->inject.prog, progress, strlen(progress)) != 0) + return FW_UPLOAD_ERR_NONE; + + if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED) + return fw_upload_wait_on_cancel(tst); + + return tst->inject.err_code; +} + +static enum fw_upload_err test_fw_upload_complete(struct fw_upload *fwl) +{ + struct test_firmware_upload *tst = fwl->dd_handle; + const char *progress = "programming:"; + + if (tst->cancel_request) + return FW_UPLOAD_ERR_CANCELED; + + if (tst->inject.err_code == FW_UPLOAD_ERR_NONE || + strncmp(tst->inject.prog, progress, strlen(progress)) != 0) + return FW_UPLOAD_ERR_NONE; + + if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED) + return fw_upload_wait_on_cancel(tst); + + return tst->inject.err_code; +} + +static void test_fw_upload_cancel(struct fw_upload *fwl) +{ + struct test_firmware_upload *tst = fwl->dd_handle; + + tst->cancel_request = true; +} + +static void test_fw_cleanup(struct fw_upload *fwl) +{ + struct test_firmware_upload *tst = fwl->dd_handle; + + tst->inject.err_code = FW_UPLOAD_ERR_NONE; + tst->inject.prog = NULL; +} + +static const struct fw_upload_ops upload_test_ops = { + .prepare = test_fw_upload_prepare, + .write = test_fw_upload_write, + .poll_complete = test_fw_upload_complete, + .cancel = test_fw_upload_cancel, + .cleanup = test_fw_cleanup +}; + +static ssize_t upload_register_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct test_firmware_upload *tst; + struct fw_upload *fwl; + char *name; + int ret; + + name = kstrndup(buf, count, GFP_KERNEL); + if (!name) + return -ENOMEM; + + mutex_lock(&test_fw_mutex); + tst = upload_lookup_name(name); + if (tst) { + ret = -EEXIST; + goto free_name; + } + + tst = kzalloc(sizeof(*tst), GFP_KERNEL); + if (!tst) { + ret = -ENOMEM; + goto free_name; + } + + tst->name = name; + tst->buf = kzalloc(TEST_UPLOAD_MAX_SIZE, GFP_KERNEL); + if (!tst->buf) { + ret = -ENOMEM; + goto free_tst; + } + + fwl = firmware_upload_register(THIS_MODULE, dev, tst->name, + &upload_test_ops, tst); + if (IS_ERR(fwl)) { + ret = PTR_ERR(fwl); + goto free_buf; + } + + tst->fwl = fwl; + list_add_tail(&tst->node, &test_upload_list); + mutex_unlock(&test_fw_mutex); + return count; + +free_buf: + kfree(tst->buf); + +free_tst: + kfree(tst); + +free_name: + mutex_unlock(&test_fw_mutex); + kfree(name); + + return ret; +} +static DEVICE_ATTR_WO(upload_register); + +static ssize_t upload_unregister_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct test_firmware_upload *tst; + int ret = count; + + mutex_lock(&test_fw_mutex); + tst = upload_lookup_name(buf); + if (!tst) { + ret = -EINVAL; + goto out; + } + + if (test_fw_config->upload_name == tst->name) + test_fw_config->upload_name = NULL; + + list_del(&tst->node); + upload_release(tst); + +out: + mutex_unlock(&test_fw_mutex); + return ret; +} +static DEVICE_ATTR_WO(upload_unregister); + static ssize_t test_result_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1051,6 +1388,45 @@ out: } static DEVICE_ATTR_RO(read_firmware); +static ssize_t upload_read_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct test_firmware_upload *tst = NULL; + struct test_firmware_upload *tst_iter; + int ret = -EINVAL; + + if (!test_fw_config->upload_name) { + pr_err("Set config_upload_name before using upload_read\n"); + return -EINVAL; + } + + mutex_lock(&test_fw_mutex); + list_for_each_entry(tst_iter, &test_upload_list, node) + if (tst_iter->name == test_fw_config->upload_name) { + tst = tst_iter; + break; + } + + if (!tst) { + pr_err("Firmware name not found: %s\n", + test_fw_config->upload_name); + goto out; + } + + if (tst->size > PAGE_SIZE) { + pr_err("Testing interface must use PAGE_SIZE firmware for now\n"); + goto out; + } + + memcpy(buf, tst->buf, tst->size); + ret = tst->size; +out: + mutex_unlock(&test_fw_mutex); + return ret; +} +static DEVICE_ATTR_RO(upload_read); + #define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr static struct attribute *test_dev_attrs[] = { @@ -1066,6 +1442,7 @@ static struct attribute *test_dev_attrs[] = { TEST_FW_DEV_ATTR(config_sync_direct), TEST_FW_DEV_ATTR(config_send_uevent), TEST_FW_DEV_ATTR(config_read_fw_idx), + TEST_FW_DEV_ATTR(config_upload_name), /* These don't use the config at all - they could be ported! */ TEST_FW_DEV_ATTR(trigger_request), @@ -1082,6 +1459,9 @@ static struct attribute *test_dev_attrs[] = { TEST_FW_DEV_ATTR(release_all_firmware), TEST_FW_DEV_ATTR(test_result), TEST_FW_DEV_ATTR(read_firmware), + TEST_FW_DEV_ATTR(upload_read), + TEST_FW_DEV_ATTR(upload_register), + TEST_FW_DEV_ATTR(upload_unregister), NULL, }; @@ -1128,6 +1508,7 @@ static void __exit test_firmware_exit(void) mutex_lock(&test_fw_mutex); release_firmware(test_firmware); misc_deregister(&test_fw_misc_device); + upload_release_all(); __test_firmware_config_free(); kfree(test_fw_config); mutex_unlock(&test_fw_mutex); diff --git a/lib/test_kasan.c b/lib/test_kasan.c index ad880231dfa8..58c1b01ccfe2 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -131,6 +131,7 @@ static void kmalloc_oob_right(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(ptr); /* * An unaligned access past the requested kmalloc size. * Only generic KASAN can precisely detect these. @@ -159,6 +160,7 @@ static void kmalloc_oob_left(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(ptr); KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1)); kfree(ptr); } @@ -171,6 +173,7 @@ static void kmalloc_node_oob_right(struct kunit *test) ptr = kmalloc_node(size, GFP_KERNEL, 0); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(ptr); KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]); kfree(ptr); } @@ -191,6 +194,7 @@ static void kmalloc_pagealloc_oob_right(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(ptr); KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0); kfree(ptr); @@ -271,6 +275,7 @@ static void kmalloc_large_oob_right(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(ptr); KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); kfree(ptr); } @@ -391,7 +396,7 @@ static void krealloc_uaf(struct kunit *test) kfree(ptr1); KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL)); - KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL); + KUNIT_ASSERT_NULL(test, ptr2); KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1); } @@ -410,6 +415,8 @@ static void kmalloc_oob_16(struct kunit *test) ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); + OPTIMIZER_HIDE_VAR(ptr1); + OPTIMIZER_HIDE_VAR(ptr2); KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2); kfree(ptr1); kfree(ptr2); @@ -756,6 +763,8 @@ static void ksize_unpoisons_memory(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); real_size = ksize(ptr); + OPTIMIZER_HIDE_VAR(ptr); + /* This access shouldn't trigger a KASAN report. */ ptr[size] = 'x'; @@ -778,6 +787,7 @@ static void ksize_uaf(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); kfree(ptr); + OPTIMIZER_HIDE_VAR(ptr); KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr)); KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); diff --git a/lib/test_meminit.c b/lib/test_meminit.c index 3ca717f11397..c95db11a6906 100644 --- a/lib/test_meminit.c +++ b/lib/test_meminit.c @@ -279,13 +279,18 @@ static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures) c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU, NULL); buf = kmem_cache_alloc(c, GFP_KERNEL); + if (!buf) + goto out; saved_ptr = buf; fill_with_garbage(buf, size); buf_contents = kmalloc(size, GFP_KERNEL); - if (!buf_contents) + if (!buf_contents) { + kmem_cache_free(c, buf); goto out; + } used_objects = kmalloc_array(maxiter, sizeof(void *), GFP_KERNEL); if (!used_objects) { + kmem_cache_free(c, buf); kfree(buf_contents); goto out; } @@ -306,11 +311,14 @@ static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures) } } + for (iter = 0; iter < maxiter; iter++) + kmem_cache_free(c, used_objects[iter]); + free_out: - kmem_cache_destroy(c); kfree(buf_contents); kfree(used_objects); out: + kmem_cache_destroy(c); *total_failures += fail; return 1; } diff --git a/lib/test_siphash.c b/lib/test_siphash.c index a6d854d933bf..a96788d0141d 100644 --- a/lib/test_siphash.c +++ b/lib/test_siphash.c @@ -1,8 +1,7 @@ -/* Test cases for siphash.c +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * - * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - * - * This file is provided under a dual BSD/GPLv2 license. + * Test cases for siphash.c * * SipHash: a fast short-input PRF * https://131002.net/siphash/ diff --git a/lib/test_string.c b/lib/test_string.c index 9dfd6f52de92..c5cb92fb710e 100644 --- a/lib/test_string.c +++ b/lib/test_string.c @@ -179,6 +179,34 @@ static __init int strnchr_selftest(void) return 0; } +static __init int strspn_selftest(void) +{ + static const struct strspn_test { + const char str[16]; + const char accept[16]; + const char reject[16]; + unsigned a; + unsigned r; + } tests[] __initconst = { + { "foobar", "", "", 0, 6 }, + { "abba", "abc", "ABBA", 4, 4 }, + { "abba", "a", "b", 1, 1 }, + { "", "abc", "abc", 0, 0}, + }; + const struct strspn_test *s = tests; + size_t i, res; + + for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) { + res = strspn(s->str, s->accept); + if (res != s->a) + return 0x100 + 2*i; + res = strcspn(s->str, s->reject); + if (res != s->r) + return 0x100 + 2*i + 1; + } + return 0; +} + static __exit void string_selftest_remove(void) { } @@ -212,6 +240,11 @@ static __init int string_selftest_init(void) if (subtest) goto fail; + test = 6; + subtest = strspn_selftest(); + if (subtest) + goto fail; + pr_info("String selftests succeeded\n"); return 0; fail: diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c index a5a3d6c27e1f..9a564971f539 100644 --- a/lib/test_sysctl.c +++ b/lib/test_sysctl.c @@ -38,6 +38,7 @@ static int i_zero; static int i_one_hundred = 100; +static int match_int_ok = 1; struct test_sysctl_data { int int_0001; @@ -96,6 +97,13 @@ static struct ctl_table test_table[] = { .proc_handler = proc_dointvec, }, { + .procname = "match_int", + .data = &match_int_ok, + .maxlen = sizeof(match_int_ok), + .mode = 0444, + .proc_handler = proc_dointvec, + }, + { .procname = "boot_int", .data = &test_data.boot_int, .maxlen = sizeof(test_data.boot_int), @@ -132,6 +140,30 @@ static struct ctl_table_header *test_sysctl_header; static int __init test_sysctl_init(void) { + int i; + + struct { + int defined; + int wanted; + } match_int[] = { + {.defined = *(int *)SYSCTL_ZERO, .wanted = 0}, + {.defined = *(int *)SYSCTL_ONE, .wanted = 1}, + {.defined = *(int *)SYSCTL_TWO, .wanted = 2}, + {.defined = *(int *)SYSCTL_THREE, .wanted = 3}, + {.defined = *(int *)SYSCTL_FOUR, .wanted = 4}, + {.defined = *(int *)SYSCTL_ONE_HUNDRED, .wanted = 100}, + {.defined = *(int *)SYSCTL_TWO_HUNDRED, .wanted = 200}, + {.defined = *(int *)SYSCTL_ONE_THOUSAND, .wanted = 1000}, + {.defined = *(int *)SYSCTL_THREE_THOUSAND, .wanted = 3000}, + {.defined = *(int *)SYSCTL_INT_MAX, .wanted = INT_MAX}, + {.defined = *(int *)SYSCTL_MAXOLDUID, .wanted = 65535}, + {.defined = *(int *)SYSCTL_NEG_ONE, .wanted = -1}, + }; + + for (i = 0; i < ARRAY_SIZE(match_int); i++) + if (match_int[i].defined != match_int[i].wanted) + match_int_ok = 0; + test_data.bitmap_0001 = kzalloc(SYSCTL_TEST_BITMAP_SIZE/8, GFP_KERNEL); if (!test_data.bitmap_0001) return -ENOMEM; diff --git a/lib/vsprintf.c b/lib/vsprintf.c index fb77f7bfd126..3c1853a9d1c0 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -769,8 +769,7 @@ static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out) static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); unsigned long flags; - if (!system_unbound_wq || - (!rng_is_initialized() && !rng_has_arch_random()) || + if (!system_unbound_wq || !rng_is_initialized() || !spin_trylock_irqsave(&filling, flags)) return -EAGAIN; diff --git a/lib/xarray.c b/lib/xarray.c index 54e646e8e6ee..ea9ce1f0b386 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -264,9 +264,10 @@ static void xa_node_free(struct xa_node *node) * xas_destroy() - Free any resources allocated during the XArray operation. * @xas: XArray operation state. * - * This function is now internal-only. + * Most users will not need to call this function; it is called for you + * by xas_nomem(). */ -static void xas_destroy(struct xa_state *xas) +void xas_destroy(struct xa_state *xas) { struct xa_node *next, *node = xas->xa_alloc; |