diff options
Diffstat (limited to 'lib')
63 files changed, 1786 insertions, 1126 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 5ddda7c2ed9b..d33a268bc256 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -70,9 +70,6 @@ source "lib/math/Kconfig" config NO_GENERIC_PCI_IOPORT_MAP bool -config GENERIC_PCI_IOMAP - bool - config GENERIC_IOMAP bool select GENERIC_PCI_IOMAP @@ -631,7 +628,7 @@ config SIGNATURE Implementation is done using GnuPG MPI library config DIMLIB - bool + tristate help Dynamic Interrupt Moderation library. Implements an algorithm for dynamically changing CQ moderation values diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2164f066e7b6..8bba448c819b 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -375,7 +375,7 @@ config DEBUG_INFO_SPLIT Incompatible with older versions of ccache. config DEBUG_INFO_BTF - bool "Generate BTF typeinfo" + bool "Generate BTF type information" depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST depends on BPF_SYSCALL @@ -408,7 +408,8 @@ config PAHOLE_HAS_LANG_EXCLUDE using DEBUG_INFO_BTF_MODULES. config DEBUG_INFO_BTF_MODULES - def_bool y + bool "Generate BTF type information for kernel modules" + default y depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF help Generate compact split BTF type information for kernel modules. @@ -1029,6 +1030,20 @@ config SOFTLOCKUP_DETECTOR chance to run. The current stack trace is displayed upon detection and the system will stay locked up. +config SOFTLOCKUP_DETECTOR_INTR_STORM + bool "Detect Interrupt Storm in Soft Lockups" + depends on SOFTLOCKUP_DETECTOR && IRQ_TIME_ACCOUNTING + select GENERIC_IRQ_STAT_SNAPSHOT + default y if NR_CPUS <= 128 + help + Say Y here to enable the kernel to detect interrupt storm + during "soft lockups". + + "soft lockups" can be caused by a variety of reasons. If one is + caused by an interrupt storm, then the storming interrupts will not + be on the callstack. To detect this case, it is necessary to report + the CPU stats and the interrupt counts during the "soft lockups". + config BOOTPARAM_SOFTLOCKUP_PANIC bool "Panic (Reboot) On Soft Lockups" depends on SOFTLOCKUP_DETECTOR @@ -1250,7 +1265,7 @@ config SCHED_INFO config SCHEDSTATS bool "Collect scheduler statistics" - depends on DEBUG_KERNEL && PROC_FS + depends on PROC_FS select SCHED_INFO help If you say Y here, additional code will be inserted into the @@ -2085,7 +2100,7 @@ config KCOV depends on ARCH_HAS_KCOV depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \ - GCC_VERSION >= 120000 || CLANG_VERSION >= 130000 + GCC_VERSION >= 120000 || CC_IS_CLANG select DEBUG_FS select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC select OBJTOOL if HAVE_NOINSTR_HACK @@ -2127,7 +2142,7 @@ config KCOV_IRQ_AREA_SIZE menuconfig RUNTIME_TESTING_MENU bool "Runtime Testing" - def_bool y + default y if RUNTIME_TESTING_MENU @@ -2142,7 +2157,7 @@ config TEST_DHRY To run the benchmark, it needs to be enabled explicitly, either from the kernel command line (when built-in), or from userspace (when - built-in or modular. + built-in or modular). Run once during kernel boot: @@ -2703,18 +2718,6 @@ config MEMCPY_KUNIT_TEST If unsure, say N. -config MEMCPY_SLOW_KUNIT_TEST - bool "Include exhaustive memcpy tests" - depends on MEMCPY_KUNIT_TEST - default y - help - Some memcpy tests are quite exhaustive in checking for overlaps - and bit ranges. These can be very slow, so they are split out - as a separate config, in case they need to be disabled. - - Note this config option will be replaced by the use of KUnit test - attributes. - config IS_SIGNED_TYPE_KUNIT_TEST tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS depends on KUNIT @@ -2770,16 +2773,6 @@ config HW_BREAKPOINT_KUNIT_TEST If unsure, say N. -config STRCAT_KUNIT_TEST - tristate "Test strcat() family of functions at runtime" if !KUNIT_ALL_TESTS - depends on KUNIT - default KUNIT_ALL_TESTS - -config STRSCPY_KUNIT_TEST - tristate "Test strscpy*() family of functions at runtime" if !KUNIT_ALL_TESTS - depends on KUNIT - default KUNIT_ALL_TESTS - config SIPHASH_KUNIT_TEST tristate "Perform selftest on siphash functions" if !KUNIT_ALL_TESTS depends on KUNIT diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index e6eda054ab27..98016e137b7f 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -158,7 +158,7 @@ config KASAN_STACK out-of-bounds bugs in stack variables. With Clang, stack instrumentation has a problem that causes excessive - stack usage, see https://bugs.llvm.org/show_bug.cgi?id=38809. Thus, + stack usage, see https://llvm.org/pr38809. Thus, with Clang, this option is deemed unsafe. This option is always disabled when compile-testing with Clang to diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 3b9a44008433..b5c0e6576749 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -43,7 +43,7 @@ config KGDB_SERIAL_CONSOLE tristate "KGDB: use kgdb over the serial console" select CONSOLE_POLL select MAGIC_SYSRQ - depends on TTY && HW_CONSOLE + depends on TTY && VT default y help Share a serial console with kgdb. Sysrq-g must be used diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index 48a67058f84e..e81e1ac4a919 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -119,6 +119,8 @@ config UBSAN_SIGNED_WRAP bool "Perform checking for signed arithmetic wrap-around" default UBSAN depends on !COMPILE_TEST + # The no_sanitize attribute was introduced in GCC with version 8. + depends on !CC_IS_GCC || GCC_VERSION >= 80000 depends on $(cc-option,-fsanitize=signed-integer-overflow) help This option enables -fsanitize=signed-integer-overflow which checks diff --git a/lib/Makefile b/lib/Makefile index 363852afa200..ed8dbf4436dd 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -152,7 +152,6 @@ CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any) obj-y += math/ crypto/ obj-$(CONFIG_GENERIC_IOMAP) += iomap.o -obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o @@ -237,6 +236,7 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o lib-$(CONFIG_GENERIC_BUG) += bug.o obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o +obj-$(CONFIG_ARCH_NEED_CMPXCHG_1_EMU) += cmpxchg-emu.o obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o #ensure exported functions have prototypes @@ -404,8 +404,6 @@ CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread) CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation) CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN) obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o -obj-$(CONFIG_STRCAT_KUNIT_TEST) += strcat_kunit.o -obj-$(CONFIG_STRSCPY_KUNIT_TEST) += strscpy_kunit.o obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o diff --git a/lib/assoc_array.c b/lib/assoc_array.c index ca0b4f360c1a..388e656ac974 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -938,7 +938,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit, edit->leaf_p = &new_n0->slots[0]; pr_devel("<--%s() = ok [split shortcut]\n", __func__); - return edit; + return true; } /** diff --git a/lib/bootconfig.c b/lib/bootconfig.c index c59d26068a64..97f8911ea339 100644 --- a/lib/bootconfig.c +++ b/lib/bootconfig.c @@ -61,9 +61,12 @@ static inline void * __init xbc_alloc_mem(size_t size) return memblock_alloc(size, SMP_CACHE_BYTES); } -static inline void __init xbc_free_mem(void *addr, size_t size) +static inline void __init xbc_free_mem(void *addr, size_t size, bool early) { - memblock_free(addr, size); + if (early) + memblock_free(addr, size); + else if (addr) + memblock_free_late(__pa(addr), size); } #else /* !__KERNEL__ */ @@ -73,7 +76,7 @@ static inline void *xbc_alloc_mem(size_t size) return malloc(size); } -static inline void xbc_free_mem(void *addr, size_t size) +static inline void xbc_free_mem(void *addr, size_t size, bool early) { free(addr); } @@ -898,19 +901,20 @@ static int __init xbc_parse_tree(void) } /** - * xbc_exit() - Clean up all parsed bootconfig + * _xbc_exit() - Clean up all parsed bootconfig + * @early: Set true if this is called before budy system is initialized. * * This clears all data structures of parsed bootconfig on memory. * If you need to reuse xbc_init() with new boot config, you can * use this. */ -void __init xbc_exit(void) +void __init _xbc_exit(bool early) { - xbc_free_mem(xbc_data, xbc_data_size); + xbc_free_mem(xbc_data, xbc_data_size, early); xbc_data = NULL; xbc_data_size = 0; xbc_node_num = 0; - xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX); + xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX, early); xbc_nodes = NULL; brace_index = 0; } @@ -963,7 +967,7 @@ int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos) if (!xbc_nodes) { if (emsg) *emsg = "Failed to allocate bootconfig nodes"; - xbc_exit(); + _xbc_exit(true); return -ENOMEM; } memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX); @@ -977,7 +981,7 @@ int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos) *epos = xbc_err_pos; if (emsg) *emsg = xbc_err_msg; - xbc_exit(); + _xbc_exit(true); } else ret = xbc_node_num; diff --git a/lib/buildid.c b/lib/buildid.c index e3a7acdeef0e..898301b49eb6 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -140,7 +140,7 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, return -EFAULT; /* page not mapped */ ret = -EINVAL; - page_addr = kmap_atomic(page); + page_addr = kmap_local_page(page); ehdr = (Elf32_Ehdr *)page_addr; /* compare magic x7f "ELF" */ @@ -156,7 +156,7 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ret = get_build_id_64(page_addr, build_id, size); out: - kunmap_atomic(page_addr); + kunmap_local(page_addr); put_page(page); return ret; } @@ -174,7 +174,7 @@ int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size) return parse_build_id_buf(build_id, NULL, buf, buf_size); } -#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE) +#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO) unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init; /** diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c index bf70850035c7..404dba36bae3 100644 --- a/lib/checksum_kunit.c +++ b/lib/checksum_kunit.c @@ -594,13 +594,15 @@ static void test_ip_fast_csum(struct kunit *test) static void test_csum_ipv6_magic(struct kunit *test) { -#if defined(CONFIG_NET) const struct in6_addr *saddr; const struct in6_addr *daddr; unsigned int len; unsigned char proto; __wsum csum; + if (!IS_ENABLED(CONFIG_NET)) + return; + const int daddr_offset = sizeof(struct in6_addr); const int len_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr); const int proto_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr) + @@ -618,7 +620,6 @@ static void test_csum_ipv6_magic(struct kunit *test) CHECK_EQ(to_sum16(expected_csum_ipv6_magic[i]), csum_ipv6_magic(saddr, daddr, len, proto, csum)); } -#endif /* !CONFIG_NET */ } static struct kunit_case __refdata checksum_test_cases[] = { diff --git a/lib/cmpxchg-emu.c b/lib/cmpxchg-emu.c new file mode 100644 index 000000000000..27f6f97cb60d --- /dev/null +++ b/lib/cmpxchg-emu.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Emulated 1-byte cmpxchg operation for architectures lacking direct + * support for this size. This is implemented in terms of 4-byte cmpxchg + * operations. + * + * Copyright (C) 2024 Paul E. McKenney. + */ + +#include <linux/types.h> +#include <linux/export.h> +#include <linux/instrumented.h> +#include <linux/atomic.h> +#include <linux/panic.h> +#include <linux/bug.h> +#include <asm-generic/rwonce.h> +#include <linux/cmpxchg-emu.h> + +union u8_32 { + u8 b[4]; + u32 w; +}; + +/* Emulate one-byte cmpxchg() in terms of 4-byte cmpxchg. */ +uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new) +{ + u32 *p32 = (u32 *)(((uintptr_t)p) & ~0x3); + int i = ((uintptr_t)p) & 0x3; + union u8_32 old32; + union u8_32 new32; + u32 ret; + + ret = READ_ONCE(*p32); + do { + old32.w = ret; + if (old32.b[i] != old) + return old32.b[i]; + new32.w = old32.w; + new32.b[i] = new; + instrument_atomic_read_write(p, 1); + ret = data_race(cmpxchg(p32, old32.w, new32.w)); // Overridden above. + } while (ret != old32.w); + return old; +} +EXPORT_SYMBOL_GPL(cmpxchg_emu_u8); diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 45436bfc6dff..b01253cac70a 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -8,6 +8,11 @@ config CRYPTO_LIB_UTILS config CRYPTO_LIB_AES tristate +config CRYPTO_LIB_AESCFB + tristate + select CRYPTO_LIB_AES + select CRYPTO_LIB_UTILS + config CRYPTO_LIB_AESGCM tristate select CRYPTO_LIB_AES diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 8d1446c2be71..969baab8c805 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -10,6 +10,9 @@ obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o libaes-y := aes.o +obj-$(CONFIG_CRYPTO_LIB_AESCFB) += libaescfb.o +libaescfb-y := aescfb.o + obj-$(CONFIG_CRYPTO_LIB_AESGCM) += libaesgcm.o libaesgcm-y := aesgcm.o diff --git a/lib/crypto/aescfb.c b/lib/crypto/aescfb.c new file mode 100644 index 000000000000..749dc1258a44 --- /dev/null +++ b/lib/crypto/aescfb.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Minimal library implementation of AES in CFB mode + * + * Copyright 2023 Google LLC + */ + +#include <linux/module.h> + +#include <crypto/algapi.h> +#include <crypto/aes.h> + +#include <asm/irqflags.h> + +static void aescfb_encrypt_block(const struct crypto_aes_ctx *ctx, void *dst, + const void *src) +{ + unsigned long flags; + + /* + * In AES-CFB, the AES encryption operates on known 'plaintext' (the IV + * and ciphertext), making it susceptible to timing attacks on the + * encryption key. The AES library already mitigates this risk to some + * extent by pulling the entire S-box into the caches before doing any + * substitutions, but this strategy is more effective when running with + * interrupts disabled. + */ + local_irq_save(flags); + aes_encrypt(ctx, dst, src); + local_irq_restore(flags); +} + +/** + * aescfb_encrypt - Perform AES-CFB encryption on a block of data + * + * @ctx: The AES-CFB key schedule + * @dst: Pointer to the ciphertext output buffer + * @src: Pointer the plaintext (may equal @dst for encryption in place) + * @len: The size in bytes of the plaintext and ciphertext. + * @iv: The initialization vector (IV) to use for this block of data + */ +void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src, + int len, const u8 iv[AES_BLOCK_SIZE]) +{ + u8 ks[AES_BLOCK_SIZE]; + const u8 *v = iv; + + while (len > 0) { + aescfb_encrypt_block(ctx, ks, v); + crypto_xor_cpy(dst, src, ks, min(len, AES_BLOCK_SIZE)); + v = dst; + + dst += AES_BLOCK_SIZE; + src += AES_BLOCK_SIZE; + len -= AES_BLOCK_SIZE; + } + + memzero_explicit(ks, sizeof(ks)); +} +EXPORT_SYMBOL(aescfb_encrypt); + +/** + * aescfb_decrypt - Perform AES-CFB decryption on a block of data + * + * @ctx: The AES-CFB key schedule + * @dst: Pointer to the plaintext output buffer + * @src: Pointer the ciphertext (may equal @dst for decryption in place) + * @len: The size in bytes of the plaintext and ciphertext. + * @iv: The initialization vector (IV) to use for this block of data + */ +void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src, + int len, const u8 iv[AES_BLOCK_SIZE]) +{ + u8 ks[2][AES_BLOCK_SIZE]; + + aescfb_encrypt_block(ctx, ks[0], iv); + + for (int i = 0; len > 0; i ^= 1) { + if (len > AES_BLOCK_SIZE) + /* + * Generate the keystream for the next block before + * performing the XOR, as that may update in place and + * overwrite the ciphertext. + */ + aescfb_encrypt_block(ctx, ks[!i], src); + + crypto_xor_cpy(dst, src, ks[i], min(len, AES_BLOCK_SIZE)); + + dst += AES_BLOCK_SIZE; + src += AES_BLOCK_SIZE; + len -= AES_BLOCK_SIZE; + } + + memzero_explicit(ks, sizeof(ks)); +} +EXPORT_SYMBOL(aescfb_decrypt); + +MODULE_DESCRIPTION("Generic AES-CFB library"); +MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>"); +MODULE_LICENSE("GPL"); + +#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS + +/* + * Test code below. Vectors taken from crypto/testmgr.h + */ + +static struct { + u8 ptext[64]; + u8 ctext[64]; + + u8 key[AES_MAX_KEY_SIZE]; + u8 iv[AES_BLOCK_SIZE]; + + int klen; + int len; +} const aescfb_tv[] __initconst = { + { /* From NIST SP800-38A */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" + "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f" + "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b" + "\x26\x75\x1f\x67\xa3\xcb\xb1\x40" + "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf" + "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e" + "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6", + .len = 64, + }, { + .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52" + "\xc8\x10\xf3\x2b\x80\x90\x79\xe5" + "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b", + .klen = 24, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab" + "\x34\xc2\x59\x09\xc9\x9a\x41\x74" + "\x67\xce\x7f\x7f\x81\x17\x36\x21" + "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a" + "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1" + "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9" + "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0" + "\x42\xae\x8f\xba\x58\x4b\x09\xff", + .len = 64, + }, { + .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe" + "\x2b\x73\xae\xf0\x85\x7d\x77\x81" + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", + .klen = 32, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b" + "\x7e\xcd\x84\x86\x98\x5d\x38\x60" + "\x39\xff\xed\x14\x3b\x28\xb1\xc8" + "\x32\x11\x3c\x63\x31\xe5\x40\x7b" + "\xdf\x10\x13\x24\x15\xe5\x4b\x92" + "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9" + "\x75\xa3\x85\x74\x1a\xb9\xce\xf8" + "\x20\x31\x62\x3d\x55\xb1\xe4\x71", + .len = 64, + }, { /* > 16 bytes, not a multiple of 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" + "\xc8", + .len = 17, + }, { /* < 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad", + .len = 7, + }, +}; + +static int __init libaescfb_init(void) +{ + for (int i = 0; i < ARRAY_SIZE(aescfb_tv); i++) { + struct crypto_aes_ctx ctx; + u8 buf[64]; + + if (aes_expandkey(&ctx, aescfb_tv[i].key, aescfb_tv[i].klen)) { + pr_err("aes_expandkey() failed on vector %d\n", i); + return -ENODEV; + } + + aescfb_encrypt(&ctx, buf, aescfb_tv[i].ptext, aescfb_tv[i].len, + aescfb_tv[i].iv); + if (memcmp(buf, aescfb_tv[i].ctext, aescfb_tv[i].len)) { + pr_err("aescfb_encrypt() #1 failed on vector %d\n", i); + return -ENODEV; + } + + /* decrypt in place */ + aescfb_decrypt(&ctx, buf, buf, aescfb_tv[i].len, aescfb_tv[i].iv); + if (memcmp(buf, aescfb_tv[i].ptext, aescfb_tv[i].len)) { + pr_err("aescfb_decrypt() failed on vector %d\n", i); + return -ENODEV; + } + + /* encrypt in place */ + aescfb_encrypt(&ctx, buf, buf, aescfb_tv[i].len, aescfb_tv[i].iv); + if (memcmp(buf, aescfb_tv[i].ctext, aescfb_tv[i].len)) { + pr_err("aescfb_encrypt() #2 failed on vector %d\n", i); + + return -ENODEV; + } + + } + return 0; +} +module_init(libaescfb_init); + +static void __exit libaescfb_exit(void) +{ +} +module_exit(libaescfb_exit); +#endif diff --git a/lib/devres.c b/lib/devres.c index c44f104b58d5..fe0c63caeb68 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/device.h> #include <linux/err.h> -#include <linux/pci.h> #include <linux/io.h> #include <linux/gfp.h> #include <linux/export.h> @@ -311,212 +311,6 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr) EXPORT_SYMBOL(devm_ioport_unmap); #endif /* CONFIG_HAS_IOPORT_MAP */ -#ifdef CONFIG_PCI -/* - * PCI iomap devres - */ -#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS - -struct pcim_iomap_devres { - void __iomem *table[PCIM_IOMAP_MAX]; -}; - -static void pcim_iomap_release(struct device *gendev, void *res) -{ - struct pci_dev *dev = to_pci_dev(gendev); - struct pcim_iomap_devres *this = res; - int i; - - for (i = 0; i < PCIM_IOMAP_MAX; i++) - if (this->table[i]) - pci_iounmap(dev, this->table[i]); -} - -/** - * pcim_iomap_table - access iomap allocation table - * @pdev: PCI device to access iomap table for - * - * Access iomap allocation table for @dev. If iomap table doesn't - * exist and @pdev is managed, it will be allocated. All iomaps - * recorded in the iomap table are automatically unmapped on driver - * detach. - * - * This function might sleep when the table is first allocated but can - * be safely called without context and guaranteed to succeed once - * allocated. - */ -void __iomem * const *pcim_iomap_table(struct pci_dev *pdev) -{ - struct pcim_iomap_devres *dr, *new_dr; - - dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); - if (dr) - return dr->table; - - new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL, - dev_to_node(&pdev->dev)); - if (!new_dr) - return NULL; - dr = devres_get(&pdev->dev, new_dr, NULL, NULL); - return dr->table; -} -EXPORT_SYMBOL(pcim_iomap_table); - -/** - * pcim_iomap - Managed pcim_iomap() - * @pdev: PCI device to iomap for - * @bar: BAR to iomap - * @maxlen: Maximum length of iomap - * - * Managed pci_iomap(). Map is automatically unmapped on driver - * detach. - */ -void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) -{ - void __iomem **tbl; - - BUG_ON(bar >= PCIM_IOMAP_MAX); - - tbl = (void __iomem **)pcim_iomap_table(pdev); - if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ - return NULL; - - tbl[bar] = pci_iomap(pdev, bar, maxlen); - return tbl[bar]; -} -EXPORT_SYMBOL(pcim_iomap); - -/** - * pcim_iounmap - Managed pci_iounmap() - * @pdev: PCI device to iounmap for - * @addr: Address to unmap - * - * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). - */ -void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) -{ - void __iomem **tbl; - int i; - - pci_iounmap(pdev, addr); - - tbl = (void __iomem **)pcim_iomap_table(pdev); - BUG_ON(!tbl); - - for (i = 0; i < PCIM_IOMAP_MAX; i++) - if (tbl[i] == addr) { - tbl[i] = NULL; - return; - } - WARN_ON(1); -} -EXPORT_SYMBOL(pcim_iounmap); - -/** - * pcim_iomap_regions - Request and iomap PCI BARs - * @pdev: PCI device to map IO resources for - * @mask: Mask of BARs to request and iomap - * @name: Name used when requesting regions - * - * Request and iomap regions specified by @mask. - */ -int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name) -{ - void __iomem * const *iomap; - int i, rc; - - iomap = pcim_iomap_table(pdev); - if (!iomap) - return -ENOMEM; - - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { - unsigned long len; - - if (!(mask & (1 << i))) - continue; - - rc = -EINVAL; - len = pci_resource_len(pdev, i); - if (!len) - goto err_inval; - - rc = pci_request_region(pdev, i, name); - if (rc) - goto err_inval; - - rc = -ENOMEM; - if (!pcim_iomap(pdev, i, 0)) - goto err_region; - } - - return 0; - - err_region: - pci_release_region(pdev, i); - err_inval: - while (--i >= 0) { - if (!(mask & (1 << i))) - continue; - pcim_iounmap(pdev, iomap[i]); - pci_release_region(pdev, i); - } - - return rc; -} -EXPORT_SYMBOL(pcim_iomap_regions); - -/** - * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones - * @pdev: PCI device to map IO resources for - * @mask: Mask of BARs to iomap - * @name: Name used when requesting regions - * - * Request all PCI BARs and iomap regions specified by @mask. - */ -int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, - const char *name) -{ - int request_mask = ((1 << 6) - 1) & ~mask; - int rc; - - rc = pci_request_selected_regions(pdev, request_mask, name); - if (rc) - return rc; - - rc = pcim_iomap_regions(pdev, mask, name); - if (rc) - pci_release_selected_regions(pdev, request_mask); - return rc; -} -EXPORT_SYMBOL(pcim_iomap_regions_request_all); - -/** - * pcim_iounmap_regions - Unmap and release PCI BARs - * @pdev: PCI device to map IO resources for - * @mask: Mask of BARs to unmap and release - * - * Unmap and release regions specified by @mask. - */ -void pcim_iounmap_regions(struct pci_dev *pdev, int mask) -{ - void __iomem * const *iomap; - int i; - - iomap = pcim_iomap_table(pdev); - if (!iomap) - return; - - for (i = 0; i < PCIM_IOMAP_MAX; i++) { - if (!(mask & (1 << i))) - continue; - - pcim_iounmap(pdev, iomap[i]); - pci_release_region(pdev, i); - } -} -EXPORT_SYMBOL(pcim_iounmap_regions); -#endif /* CONFIG_PCI */ - static void devm_arch_phys_ac_add_release(struct device *dev, void *res) { arch_phys_wc_del(*((int *)res)); diff --git a/lib/dhry_1.c b/lib/dhry_1.c index 08edbbb19f57..ca6c87232c58 100644 --- a/lib/dhry_1.c +++ b/lib/dhry_1.c @@ -277,7 +277,7 @@ int dhry(int n) dhry_assert_string_eq(Str_1_Loc, "DHRYSTONE PROGRAM, 1'ST STRING"); dhry_assert_string_eq(Str_2_Loc, "DHRYSTONE PROGRAM, 2'ND STRING"); - User_Time = ktime_to_ms(ktime_sub(End_Time, Begin_Time)); + User_Time = ktime_ms_delta(End_Time, Begin_Time); kfree(Ptr_Glob); kfree(Next_Ptr_Glob); diff --git a/lib/dhry_run.c b/lib/dhry_run.c index f15ac666e9d3..e6a279dabf84 100644 --- a/lib/dhry_run.c +++ b/lib/dhry_run.c @@ -10,7 +10,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/mutex.h> #include <linux/smp.h> #define DHRY_VAX 1757 diff --git a/lib/dim/Makefile b/lib/dim/Makefile index 1d6858a108cb..c4cc4026c451 100644 --- a/lib/dim/Makefile +++ b/lib/dim/Makefile @@ -2,6 +2,6 @@ # DIM Dynamic Interrupt Moderation library # -obj-$(CONFIG_DIMLIB) += dim.o +obj-$(CONFIG_DIMLIB) += dimlib.o -dim-y := dim.o net_dim.o rdma_dim.o +dimlib-objs := dim.o net_dim.o rdma_dim.o diff --git a/lib/dim/dim.c b/lib/dim/dim.c index e89aaf07bde5..83b65ac74d73 100644 --- a/lib/dim/dim.c +++ b/lib/dim/dim.c @@ -82,3 +82,6 @@ bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end, return true; } EXPORT_SYMBOL(dim_calc_stats); + +MODULE_DESCRIPTION("Dynamic Interrupt Moderation (DIM) library"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 6fba6423cc10..f2c5e7910bb1 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -302,7 +302,11 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) } else { for (end = buf; *end && !isspace(*end); end++) ; - BUG_ON(end == buf); + if (end == buf) { + pr_err("parse err after word:%d=%s\n", nwords, + nwords ? words[nwords - 1] : "<none>"); + return -EINVAL; + } } /* `buf' is start of word, `end' is one past its end */ @@ -640,10 +644,9 @@ static int param_set_dyndbg_classnames(const char *instr, const struct kernel_pa int cls_id, totct = 0; bool wanted; - cl_str = tmp = kstrdup(instr, GFP_KERNEL); - p = strchr(cl_str, '\n'); - if (p) - *p = '\0'; + cl_str = tmp = kstrdup_and_replace(instr, '\n', '\0', GFP_KERNEL); + if (!tmp) + return -ENOMEM; /* start with previously set state-bits, then modify */ curr_bits = old_bits = *dcp->bits; diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index a1389db1c30a..e49deddd3de9 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c @@ -15,12 +15,10 @@ #define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0) #define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0) -static void dql_check_stall(struct dql *dql) +static void dql_check_stall(struct dql *dql, unsigned short stall_thrs) { - unsigned short stall_thrs; unsigned long now; - stall_thrs = READ_ONCE(dql->stall_thrs); if (!stall_thrs) return; @@ -86,9 +84,16 @@ void dql_completed(struct dql *dql, unsigned int count) { unsigned int inprogress, prev_inprogress, limit; unsigned int ovlimit, completed, num_queued; + unsigned short stall_thrs; bool all_prev_completed; num_queued = READ_ONCE(dql->num_queued); + /* Read stall_thrs in advance since it belongs to the same (first) + * cache line as ->num_queued. This way, dql_check_stall() does not + * need to touch the first cache line again later, reducing the window + * of possible false sharing. + */ + stall_thrs = READ_ONCE(dql->stall_thrs); /* Can't complete more than what's in queue */ BUG_ON(count > num_queued - dql->num_completed); @@ -178,7 +183,7 @@ void dql_completed(struct dql *dql, unsigned int count) dql->num_completed = completed; dql->prev_num_queued = num_queued; - dql_check_stall(dql); + dql_check_stall(dql, stall_thrs); } EXPORT_SYMBOL(dql_completed); diff --git a/lib/find_bit.c b/lib/find_bit.c index 32f99e9a670e..dacadd904250 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -116,6 +116,18 @@ unsigned long _find_first_and_bit(const unsigned long *addr1, EXPORT_SYMBOL(_find_first_and_bit); #endif +/* + * Find the first set bit in three memory regions. + */ +unsigned long _find_first_and_and_bit(const unsigned long *addr1, + const unsigned long *addr2, + const unsigned long *addr3, + unsigned long size) +{ + return FIND_FIRST_BIT(addr1[idx] & addr2[idx] & addr3[idx], /* nop */, size); +} +EXPORT_SYMBOL(_find_first_and_and_bit); + #ifndef find_first_zero_bit /* * Find the first cleared bit in a memory region. diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index 83332fefa6f4..84ecccddc771 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c @@ -84,83 +84,6 @@ bool fprop_new_period(struct fprop_global *p, int periods) } /* - * ---- SINGLE ---- - */ - -int fprop_local_init_single(struct fprop_local_single *pl) -{ - pl->events = 0; - pl->period = 0; - raw_spin_lock_init(&pl->lock); - return 0; -} - -void fprop_local_destroy_single(struct fprop_local_single *pl) -{ -} - -static void fprop_reflect_period_single(struct fprop_global *p, - struct fprop_local_single *pl) -{ - unsigned int period = p->period; - unsigned long flags; - - /* Fast path - period didn't change */ - if (pl->period == period) - return; - raw_spin_lock_irqsave(&pl->lock, flags); - /* Someone updated pl->period while we were spinning? */ - if (pl->period >= period) { - raw_spin_unlock_irqrestore(&pl->lock, flags); - return; - } - /* Aging zeroed our fraction? */ - if (period - pl->period < BITS_PER_LONG) - pl->events >>= period - pl->period; - else - pl->events = 0; - pl->period = period; - raw_spin_unlock_irqrestore(&pl->lock, flags); -} - -/* Event of type pl happened */ -void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl) -{ - fprop_reflect_period_single(p, pl); - pl->events++; - percpu_counter_add(&p->events, 1); -} - -/* Return fraction of events of type pl */ -void fprop_fraction_single(struct fprop_global *p, - struct fprop_local_single *pl, - unsigned long *numerator, unsigned long *denominator) -{ - unsigned int seq; - s64 num, den; - - do { - seq = read_seqcount_begin(&p->sequence); - fprop_reflect_period_single(p, pl); - num = pl->events; - den = percpu_counter_read_positive(&p->events); - } while (read_seqcount_retry(&p->sequence, seq)); - - /* - * Make fraction <= 1 and denominator > 0 even in presence of percpu - * counter errors - */ - if (den <= num) { - if (num) - den = num; - else - den = 1; - } - *denominator = den; - *numerator = num; -} - -/* * ---- PERCPU ---- */ #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids))) diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig index 7ee468ef21ec..7e945fdcbf11 100644 --- a/lib/fonts/Kconfig +++ b/lib/fonts/Kconfig @@ -98,7 +98,8 @@ config FONT_10x18 config FONT_SUN8x16 bool "Sparc console 8x16 font" - depends on (FRAMEBUFFER_CONSOLE && (FONTS || SPARC)) || BOOTX_TEXT + depends on (FRAMEBUFFER_CONSOLE && (FONTS || SPARC)) || \ + BOOTX_TEXT || EARLYFB help This is the high resolution console font for Sun machines. Say Y. diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c index 973866438608..47e34950b665 100644 --- a/lib/fonts/fonts.c +++ b/lib/fonts/fonts.c @@ -96,18 +96,21 @@ EXPORT_SYMBOL(find_font); * get_default_font - get default font * @xres: screen size of X * @yres: screen size of Y - * @font_w: bit array of supported widths (1 - 32) - * @font_h: bit array of supported heights (1 - 32) + * @font_w: bit array of supported widths (1 - FB_MAX_BLIT_WIDTH) + * @font_h: bit array of supported heights (1 - FB_MAX_BLIT_HEIGHT) * * Get the default font for a specified screen size. * Dimensions are in pixels. * + * font_w or font_h being NULL means all values are supported. + * * Returns %NULL if no font is found, or a pointer to the * chosen font. * */ -const struct font_desc *get_default_font(int xres, int yres, u32 font_w, - u32 font_h) +const struct font_desc *get_default_font(int xres, int yres, + unsigned long *font_w, + unsigned long *font_h) { int i, c, cc, res; const struct font_desc *f, *g; @@ -135,8 +138,8 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w, if (res > 20) c += 20 - res; - if ((font_w & (1U << (f->width - 1))) && - (font_h & (1U << (f->height - 1)))) + if ((!font_w || test_bit(f->width - 1, font_w)) && + (!font_h || test_bit(f->height - 1, font_h))) c += 1000; if (c > cc) { diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c index 493ec02dd5b3..d2377e00caab 100644 --- a/lib/fortify_kunit.c +++ b/lib/fortify_kunit.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Runtime test cases for CONFIG_FORTIFY_SOURCE. For testing memcpy(), - * see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c). + * Runtime test cases for CONFIG_FORTIFY_SOURCE. For additional memcpy() + * testing see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c). * * For corner cases with UBSAN, try testing with: * @@ -15,14 +15,31 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +/* We don't need to fill dmesg with the fortify WARNs during testing. */ +#ifdef DEBUG +# define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x) +# define FORTIFY_WARN_KUNIT(x...) WARN_ONCE(x) +#else +# define FORTIFY_REPORT_KUNIT(x...) do { } while (0) +# define FORTIFY_WARN_KUNIT(x...) do { } while (0) +#endif + /* Redefine fortify_panic() to track failures. */ void fortify_add_kunit_error(int write); #define fortify_panic(func, write, avail, size, retfail) do { \ - __fortify_report(FORTIFY_REASON(func, write), avail, size); \ + FORTIFY_REPORT_KUNIT(FORTIFY_REASON(func, write), avail, size); \ fortify_add_kunit_error(write); \ return (retfail); \ } while (0) +/* Redefine fortify_warn_once() to track memcpy() failures. */ +#define fortify_warn_once(chk_func, x...) do { \ + bool __result = chk_func; \ + FORTIFY_WARN_KUNIT(__result, x); \ + if (__result) \ + fortify_add_kunit_error(1); \ +} while (0) + #include <kunit/device.h> #include <kunit/test.h> #include <kunit/test-bug.h> @@ -64,7 +81,7 @@ void fortify_add_kunit_error(int write) kunit_put_resource(resource); } -static void known_sizes_test(struct kunit *test) +static void fortify_test_known_sizes(struct kunit *test) { KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8); KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10); @@ -97,7 +114,7 @@ static noinline size_t want_minus_one(int pick) return __compiletime_strlen(str); } -static void control_flow_split_test(struct kunit *test) +static void fortify_test_control_flow_split(struct kunit *test) { KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX); } @@ -173,11 +190,11 @@ static volatile size_t unknown_size = 50; #endif #define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \ -static void alloc_size_##allocator##_const_test(struct kunit *test) \ +static void fortify_test_alloc_size_##allocator##_const(struct kunit *test) \ { \ CONST_TEST_BODY(TEST_##allocator); \ } \ -static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \ +static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \ { \ DYNAMIC_TEST_BODY(TEST_##allocator); \ } @@ -267,28 +284,28 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc) \ checker((expected_pages) * PAGE_SIZE, \ kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \ - vfree(p)); \ + kvfree(p)); \ \ prev_size = (expected_pages) * PAGE_SIZE; \ orig = kvmalloc(prev_size, gfp); \ @@ -346,6 +363,31 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc) } while (0) DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc) +static const char * const test_strs[] = { + "", + "Hello there", + "A longer string, just for variety", +}; + +#define TEST_realloc(checker) do { \ + gfp_t gfp = GFP_KERNEL; \ + size_t len; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(test_strs); i++) { \ + len = strlen(test_strs[i]); \ + KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0); \ + checker(len, kmemdup_array(test_strs[i], len, 1, gfp), \ + kfree(p)); \ + checker(len, kmemdup(test_strs[i], len, gfp), \ + kfree(p)); \ + } \ +} while (0) +static void fortify_test_realloc_size(struct kunit *test) +{ + TEST_realloc(check_dynamic); +} + /* * We can't have an array at the end of a structure or else * builds without -fstrict-flex-arrays=3 will report them as @@ -361,7 +403,7 @@ struct fortify_padding { /* Force compiler into not being able to resolve size at compile-time. */ static volatile int unconst; -static void strlen_test(struct kunit *test) +static void fortify_test_strlen(struct kunit *test) { struct fortify_padding pad = { }; int i, end = sizeof(pad.buf) - 1; @@ -384,7 +426,7 @@ static void strlen_test(struct kunit *test) KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); } -static void strnlen_test(struct kunit *test) +static void fortify_test_strnlen(struct kunit *test) { struct fortify_padding pad = { }; int i, end = sizeof(pad.buf) - 1; @@ -422,7 +464,7 @@ static void strnlen_test(struct kunit *test) KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); } -static void strcpy_test(struct kunit *test) +static void fortify_test_strcpy(struct kunit *test) { struct fortify_padding pad = { }; char src[sizeof(pad.buf) + 1] = { }; @@ -480,7 +522,7 @@ static void strcpy_test(struct kunit *test) KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); } -static void strncpy_test(struct kunit *test) +static void fortify_test_strncpy(struct kunit *test) { struct fortify_padding pad = { }; char src[] = "Copy me fully into a small buffer and I will overflow!"; @@ -539,7 +581,7 @@ static void strncpy_test(struct kunit *test) KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); } -static void strscpy_test(struct kunit *test) +static void fortify_test_strscpy(struct kunit *test) { struct fortify_padding pad = { }; char src[] = "Copy me fully into a small buffer and I will overflow!"; @@ -596,7 +638,7 @@ static void strscpy_test(struct kunit *test) KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); } -static void strcat_test(struct kunit *test) +static void fortify_test_strcat(struct kunit *test) { struct fortify_padding pad = { }; char src[sizeof(pad.buf) / 2] = { }; @@ -653,7 +695,7 @@ static void strcat_test(struct kunit *test) KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); } -static void strncat_test(struct kunit *test) +static void fortify_test_strncat(struct kunit *test) { struct fortify_padding pad = { }; char src[sizeof(pad.buf)] = { }; @@ -726,7 +768,7 @@ static void strncat_test(struct kunit *test) KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); } -static void strlcat_test(struct kunit *test) +static void fortify_test_strlcat(struct kunit *test) { struct fortify_padding pad = { }; char src[sizeof(pad.buf)] = { }; @@ -811,7 +853,75 @@ static void strlcat_test(struct kunit *test) KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); } -static void memscan_test(struct kunit *test) +/* Check for 0-sized arrays... */ +struct fortify_zero_sized { + unsigned long bytes_before; + char buf[0]; + unsigned long bytes_after; +}; + +#define __fortify_test(memfunc) \ +static void fortify_test_##memfunc(struct kunit *test) \ +{ \ + struct fortify_zero_sized zero = { }; \ + struct fortify_padding pad = { }; \ + char srcA[sizeof(pad.buf) + 2]; \ + char srcB[sizeof(pad.buf) + 2]; \ + size_t len = sizeof(pad.buf) + unconst; \ + \ + memset(srcA, 'A', sizeof(srcA)); \ + KUNIT_ASSERT_EQ(test, srcA[0], 'A'); \ + memset(srcB, 'B', sizeof(srcB)); \ + KUNIT_ASSERT_EQ(test, srcB[0], 'B'); \ + \ + memfunc(pad.buf, srcA, 0 + unconst); \ + KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf + 1, srcB, 1 + unconst); \ + KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \ + KUNIT_EXPECT_EQ(test, pad.buf[2], '\0'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, 1 + unconst); \ + KUNIT_EXPECT_EQ(test, pad.buf[0], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, len - 1); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.buf[len - 1], '\0'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, len); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.buf[len - 1], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, len + 1); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \ + memfunc(pad.buf + 1, srcB, len); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); \ + \ + /* Reset error counter. */ \ + fortify_write_overflows = 0; \ + /* Copy nothing into nothing: no errors. */ \ + memfunc(zero.buf, srcB, 0 + unconst); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + /* We currently explicitly ignore zero-sized dests. */ \ + memfunc(zero.buf, srcB, 1 + unconst); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ +} +__fortify_test(memcpy) +__fortify_test(memmove) + +static void fortify_test_memscan(struct kunit *test) { char haystack[] = "Where oh where is my memory range?"; char *mem = haystack + strlen("Where oh where is "); @@ -830,7 +940,7 @@ static void memscan_test(struct kunit *test) KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); } -static void memchr_test(struct kunit *test) +static void fortify_test_memchr(struct kunit *test) { char haystack[] = "Where oh where is my memory range?"; char *mem = haystack + strlen("Where oh where is "); @@ -849,7 +959,7 @@ static void memchr_test(struct kunit *test) KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); } -static void memchr_inv_test(struct kunit *test) +static void fortify_test_memchr_inv(struct kunit *test) { char haystack[] = "Where oh where is my memory range?"; char *mem = haystack + 1; @@ -869,7 +979,7 @@ static void memchr_inv_test(struct kunit *test) KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); } -static void memcmp_test(struct kunit *test) +static void fortify_test_memcmp(struct kunit *test) { char one[] = "My mind is going ..."; char two[] = "My mind is going ... I can feel it."; @@ -891,7 +1001,7 @@ static void memcmp_test(struct kunit *test) KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); } -static void kmemdup_test(struct kunit *test) +static void fortify_test_kmemdup(struct kunit *test) { char src[] = "I got Doom running on it!"; char *copy; @@ -917,19 +1027,19 @@ static void kmemdup_test(struct kunit *test) /* Out of bounds by 1 byte. */ copy = kmemdup(src, len + 1, GFP_KERNEL); - KUNIT_EXPECT_NULL(test, copy); + KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR); KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); kfree(copy); /* Way out of bounds. */ copy = kmemdup(src, len * 2, GFP_KERNEL); - KUNIT_EXPECT_NULL(test, copy); + KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR); KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); kfree(copy); /* Starting offset causing out of bounds. */ copy = kmemdup(src + 1, len, GFP_KERNEL); - KUNIT_EXPECT_NULL(test, copy); + KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR); KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3); kfree(copy); } @@ -951,31 +1061,33 @@ static int fortify_test_init(struct kunit *test) } static struct kunit_case fortify_test_cases[] = { - KUNIT_CASE(known_sizes_test), - KUNIT_CASE(control_flow_split_test), - KUNIT_CASE(alloc_size_kmalloc_const_test), - KUNIT_CASE(alloc_size_kmalloc_dynamic_test), - KUNIT_CASE(alloc_size_vmalloc_const_test), - KUNIT_CASE(alloc_size_vmalloc_dynamic_test), - KUNIT_CASE(alloc_size_kvmalloc_const_test), - KUNIT_CASE(alloc_size_kvmalloc_dynamic_test), - KUNIT_CASE(alloc_size_devm_kmalloc_const_test), - KUNIT_CASE(alloc_size_devm_kmalloc_dynamic_test), - KUNIT_CASE(strlen_test), - KUNIT_CASE(strnlen_test), - KUNIT_CASE(strcpy_test), - KUNIT_CASE(strncpy_test), - KUNIT_CASE(strscpy_test), - KUNIT_CASE(strcat_test), - KUNIT_CASE(strncat_test), - KUNIT_CASE(strlcat_test), + KUNIT_CASE(fortify_test_known_sizes), + KUNIT_CASE(fortify_test_control_flow_split), + KUNIT_CASE(fortify_test_alloc_size_kmalloc_const), + KUNIT_CASE(fortify_test_alloc_size_kmalloc_dynamic), + KUNIT_CASE(fortify_test_alloc_size_vmalloc_const), + KUNIT_CASE(fortify_test_alloc_size_vmalloc_dynamic), + KUNIT_CASE(fortify_test_alloc_size_kvmalloc_const), + KUNIT_CASE(fortify_test_alloc_size_kvmalloc_dynamic), + KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_const), + KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_dynamic), + KUNIT_CASE(fortify_test_realloc_size), + KUNIT_CASE(fortify_test_strlen), + KUNIT_CASE(fortify_test_strnlen), + KUNIT_CASE(fortify_test_strcpy), + KUNIT_CASE(fortify_test_strncpy), + KUNIT_CASE(fortify_test_strscpy), + KUNIT_CASE(fortify_test_strcat), + KUNIT_CASE(fortify_test_strncat), + KUNIT_CASE(fortify_test_strlcat), /* skip memset: performs bounds checking on whole structs */ - /* skip memcpy: still using warn-and-overwrite instead of hard-fail */ - KUNIT_CASE(memscan_test), - KUNIT_CASE(memchr_test), - KUNIT_CASE(memchr_inv_test), - KUNIT_CASE(memcmp_test), - KUNIT_CASE(kmemdup_test), + KUNIT_CASE(fortify_test_memcpy), + KUNIT_CASE(fortify_test_memmove), + KUNIT_CASE(fortify_test_memscan), + KUNIT_CASE(fortify_test_memchr), + KUNIT_CASE(fortify_test_memchr_inv), + KUNIT_CASE(fortify_test_memcmp), + KUNIT_CASE(fortify_test_kmemdup), {} }; diff --git a/lib/fw_table.c b/lib/fw_table.c index c3569d2ba503..16291814450e 100644 --- a/lib/fw_table.c +++ b/lib/fw_table.c @@ -127,6 +127,7 @@ static __init_or_fwtbl_lib int call_handler(struct acpi_subtable_proc *proc, * * @id: table id (for debugging purposes) * @table_size: size of the root table + * @max_length: maximum size of the table (ignore if 0) * @table_header: where does the table start? * @proc: array of acpi_subtable_proc struct containing entry id * and associated handler with it @@ -148,18 +149,21 @@ static __init_or_fwtbl_lib int call_handler(struct acpi_subtable_proc *proc, int __init_or_fwtbl_lib acpi_parse_entries_array(char *id, unsigned long table_size, union fw_table_header *table_header, + unsigned long max_length, struct acpi_subtable_proc *proc, int proc_num, unsigned int max_entries) { - unsigned long table_end, subtable_len, entry_len; + unsigned long table_len, table_end, subtable_len, entry_len; struct acpi_subtable_entry entry; enum acpi_subtable_type type; int count = 0; int i; type = acpi_get_subtable_type(id); - table_end = (unsigned long)table_header + - acpi_table_get_length(type, table_header); + table_len = acpi_table_get_length(type, table_header); + if (max_length && max_length < table_len) + table_len = max_length; + table_end = (unsigned long)table_header + table_len; /* Parse all entries looking for a match. */ @@ -208,7 +212,8 @@ int __init_or_fwtbl_lib cdat_table_parse(enum acpi_cdat_type type, acpi_tbl_entry_handler_arg handler_arg, void *arg, - struct acpi_table_cdat *table_header) + struct acpi_table_cdat *table_header, + unsigned long length) { struct acpi_subtable_proc proc = { .id = type, @@ -222,6 +227,6 @@ cdat_table_parse(enum acpi_cdat_type type, return acpi_parse_entries_array(ACPI_SIG_CDAT, sizeof(struct acpi_table_cdat), (union fw_table_header *)table_header, - &proc, 1, 0); + length, &proc, 1, 0); } EXPORT_SYMBOL_FWTBL_LIB(cdat_table_parse); diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c index 41f1bcdc4488..aaefb9b678c8 100644 --- a/lib/generic-radix-tree.c +++ b/lib/generic-radix-tree.c @@ -5,7 +5,7 @@ #include <linux/gfp.h> #include <linux/kmemleak.h> -#define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *)) +#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *)) #define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY) struct genradix_node { @@ -14,13 +14,13 @@ struct genradix_node { struct genradix_node *children[GENRADIX_ARY]; /* Leaf: */ - u8 data[PAGE_SIZE]; + u8 data[GENRADIX_NODE_SIZE]; }; }; static inline int genradix_depth_shift(unsigned depth) { - return PAGE_SHIFT + GENRADIX_ARY_SHIFT * depth; + return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth; } /* @@ -33,7 +33,7 @@ static inline size_t genradix_depth_size(unsigned depth) /* depth that's needed for a genradix that can address up to ULONG_MAX: */ #define GENRADIX_MAX_DEPTH \ - DIV_ROUND_UP(BITS_PER_LONG - PAGE_SHIFT, GENRADIX_ARY_SHIFT) + DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT) #define GENRADIX_DEPTH_MASK \ ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1)) @@ -79,23 +79,12 @@ EXPORT_SYMBOL(__genradix_ptr); static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) { - struct genradix_node *node; - - node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO); - - /* - * We're using pages (not slab allocations) directly for kernel data - * structures, so we need to explicitly inform kmemleak of them in order - * to avoid false positive memory leak reports. - */ - kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask); - return node; + return kzalloc(GENRADIX_NODE_SIZE, gfp_mask); } static inline void genradix_free_node(struct genradix_node *node) { - kmemleak_free(node); - free_page((unsigned long)node); + kfree(node); } /* @@ -200,7 +189,7 @@ restart: i++; iter->offset = round_down(iter->offset + objs_per_ptr, objs_per_ptr); - iter->pos = (iter->offset >> PAGE_SHIFT) * + iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page; if (i == GENRADIX_ARY) goto restart; @@ -209,7 +198,7 @@ restart: n = n->children[i]; } - return &n->data[iter->offset & (PAGE_SIZE - 1)]; + return &n->data[iter->offset & (GENRADIX_NODE_SIZE - 1)]; } EXPORT_SYMBOL(__genradix_iter_peek); @@ -235,7 +224,7 @@ restart: if (ilog2(iter->offset) >= genradix_depth_shift(level)) { iter->offset = genradix_depth_size(level); - iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page; + iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page; iter->offset -= obj_size_plus_page_remainder; iter->pos--; @@ -251,7 +240,7 @@ restart: size_t objs_per_ptr = genradix_depth_size(level); iter->offset = round_down(iter->offset, objs_per_ptr); - iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page; + iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page; if (!iter->offset) return NULL; @@ -267,7 +256,7 @@ restart: n = n->children[i]; } - return &n->data[iter->offset & (PAGE_SIZE - 1)]; + return &n->data[iter->offset & (GENRADIX_NODE_SIZE - 1)]; } EXPORT_SYMBOL(__genradix_iter_peek_prev); @@ -289,7 +278,7 @@ int __genradix_prealloc(struct __genradix *radix, size_t size, { size_t offset; - for (offset = 0; offset < size; offset += PAGE_SIZE) + for (offset = 0; offset < size; offset += GENRADIX_NODE_SIZE) if (!__genradix_ptr_alloc(radix, offset, gfp_mask)) return -ENOMEM; diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index fb9a2f06dd1e..03b427e2707e 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -30,7 +30,7 @@ #include <net/net_namespace.h> -u64 uevent_seqnum; +atomic64_t uevent_seqnum; #ifdef CONFIG_UEVENT_HELPER char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; #endif @@ -42,10 +42,9 @@ struct uevent_sock { #ifdef CONFIG_NET static LIST_HEAD(uevent_sock_list); -#endif - -/* This lock protects uevent_seqnum and uevent_sock_list */ +/* This lock protects uevent_sock_list */ static DEFINE_MUTEX(uevent_sock_mutex); +#endif /* the strings here must match the enum in include/linux/kobject.h */ static const char *kobject_actions[] = { @@ -315,6 +314,7 @@ static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env, int retval = 0; /* send netlink message */ + mutex_lock(&uevent_sock_mutex); list_for_each_entry(ue_sk, &uevent_sock_list, list) { struct sock *uevent_sock = ue_sk->sk; @@ -334,6 +334,7 @@ static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env, if (retval == -ENOBUFS || retval == -ESRCH) retval = 0; } + mutex_unlock(&uevent_sock_mutex); consume_skb(skb); return retval; @@ -583,16 +584,14 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, break; } - mutex_lock(&uevent_sock_mutex); /* we will send an event, so request a new sequence number */ - retval = add_uevent_var(env, "SEQNUM=%llu", ++uevent_seqnum); - if (retval) { - mutex_unlock(&uevent_sock_mutex); + retval = add_uevent_var(env, "SEQNUM=%llu", + atomic64_inc_return(&uevent_seqnum)); + if (retval) goto exit; - } + retval = kobject_uevent_net_broadcast(kobj, env, action_string, devpath); - mutex_unlock(&uevent_sock_mutex); #ifdef CONFIG_UEVENT_HELPER /* call uevent_helper, usually only enabled during early boot */ @@ -688,7 +687,8 @@ static int uevent_net_broadcast(struct sock *usk, struct sk_buff *skb, int ret; /* bump and prepare sequence number */ - ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu", ++uevent_seqnum); + ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu", + atomic64_inc_return(&uevent_seqnum)); if (ret < 0 || (size_t)ret >= sizeof(buf)) return -ENOMEM; ret++; @@ -742,9 +742,7 @@ static int uevent_net_rcv_skb(struct sk_buff *skb, struct nlmsghdr *nlh, return -EPERM; } - mutex_lock(&uevent_sock_mutex); ret = uevent_net_broadcast(net->uevent_sock->sk, skb, extack); - mutex_unlock(&uevent_sock_mutex); return ret; } diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig index 68a6daec0aef..34d7242d526d 100644 --- a/lib/kunit/Kconfig +++ b/lib/kunit/Kconfig @@ -24,6 +24,17 @@ config KUNIT_DEBUGFS test suite, which allow users to see results of the last test suite run that occurred. +config KUNIT_FAULT_TEST + bool "Enable KUnit tests which print BUG stacktraces" + depends on KUNIT_TEST + depends on !UML + default y + help + Enables fault handling tests for the KUnit framework. These tests may + trigger a kernel BUG(), and the associated stack trace, even when they + pass. If this conflicts with your test infrastrcture (or is confusing + or annoying), they can be disabled by setting this to N. + config KUNIT_TEST tristate "KUnit test for KUnit" if !KUNIT_ALL_TESTS default KUNIT_ALL_TESTS diff --git a/lib/kunit/device.c b/lib/kunit/device.c index abc603730b8e..25c81ed465fb 100644 --- a/lib/kunit/device.c +++ b/lib/kunit/device.c @@ -51,7 +51,7 @@ int kunit_bus_init(void) error = bus_register(&kunit_bus_type); if (error) - bus_unregister(&kunit_bus_type); + root_device_unregister(kunit_bus_device); return error; } diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index f7980ef236a3..e3412e0ca399 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -109,6 +109,48 @@ static struct kunit_suite kunit_try_catch_test_suite = { .test_cases = kunit_try_catch_test_cases, }; +#if IS_ENABLED(CONFIG_KUNIT_FAULT_TEST) + +static void kunit_test_null_dereference(void *data) +{ + struct kunit *test = data; + int *null = NULL; + + *null = 0; + + KUNIT_FAIL(test, "This line should never be reached\n"); +} + +static void kunit_test_fault_null_dereference(struct kunit *test) +{ + struct kunit_try_catch_test_context *ctx = test->priv; + struct kunit_try_catch *try_catch = ctx->try_catch; + + kunit_try_catch_init(try_catch, + test, + kunit_test_null_dereference, + kunit_test_catch); + kunit_try_catch_run(try_catch, test); + + KUNIT_EXPECT_EQ(test, try_catch->try_result, -EINTR); + KUNIT_EXPECT_TRUE(test, ctx->function_called); +} + +#endif /* CONFIG_KUNIT_FAULT_TEST */ + +static struct kunit_case kunit_fault_test_cases[] = { +#if IS_ENABLED(CONFIG_KUNIT_FAULT_TEST) + KUNIT_CASE(kunit_test_fault_null_dereference), +#endif /* CONFIG_KUNIT_FAULT_TEST */ + {} +}; + +static struct kunit_suite kunit_fault_test_suite = { + .name = "kunit_fault", + .init = kunit_try_catch_test_init, + .test_cases = kunit_fault_test_cases, +}; + /* * Context for testing test managed resources * is_resource_initialized is used to test arbitrary resources @@ -826,6 +868,7 @@ static struct kunit_suite kunit_current_test_suite = { kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite, &kunit_log_test_suite, &kunit_status_test_suite, - &kunit_current_test_suite, &kunit_device_test_suite); + &kunit_current_test_suite, &kunit_device_test_suite, + &kunit_fault_test_suite); MODULE_LICENSE("GPL v2"); diff --git a/lib/kunit/string-stream-test.c b/lib/kunit/string-stream-test.c index 03fb511826f7..7511442ea98f 100644 --- a/lib/kunit/string-stream-test.c +++ b/lib/kunit/string-stream-test.c @@ -22,18 +22,10 @@ struct string_stream_test_priv { }; /* Avoids a cast warning if kfree() is passed direct to kunit_add_action(). */ -static void kfree_wrapper(void *p) -{ - kfree(p); -} +KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *); /* Avoids a cast warning if string_stream_destroy() is passed direct to kunit_add_action(). */ -static void cleanup_raw_stream(void *p) -{ - struct string_stream *stream = p; - - string_stream_destroy(stream); -} +KUNIT_DEFINE_ACTION_WRAPPER(cleanup_raw_stream, string_stream_destroy, struct string_stream *); static char *get_concatenated_string(struct kunit *test, struct string_stream *stream) { diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 1d1475578515..b8514dbb337c 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -712,6 +712,9 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_ { unsigned int i; + if (num_suites == 0) + return 0; + if (!kunit_enabled() && num_suites > 0) { pr_info("kunit: disabled\n"); return 0; diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c index f7825991d576..6bbe0025b079 100644 --- a/lib/kunit/try-catch.c +++ b/lib/kunit/try-catch.c @@ -11,13 +11,14 @@ #include <linux/completion.h> #include <linux/kernel.h> #include <linux/kthread.h> +#include <linux/sched/task.h> #include "try-catch-impl.h" void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch) { try_catch->try_result = -EFAULT; - kthread_complete_and_exit(try_catch->try_completion, -EFAULT); + kthread_exit(0); } EXPORT_SYMBOL_GPL(kunit_try_catch_throw); @@ -25,9 +26,12 @@ static int kunit_generic_run_threadfn_adapter(void *data) { struct kunit_try_catch *try_catch = data; + try_catch->try_result = -EINTR; try_catch->try(try_catch->context); + if (try_catch->try_result == -EINTR) + try_catch->try_result = 0; - kthread_complete_and_exit(try_catch->try_completion, 0); + return 0; } static unsigned long kunit_test_timeout(void) @@ -57,30 +61,38 @@ static unsigned long kunit_test_timeout(void) void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context) { - DECLARE_COMPLETION_ONSTACK(try_completion); struct kunit *test = try_catch->test; struct task_struct *task_struct; + struct completion *task_done; int exit_code, time_remaining; try_catch->context = context; - try_catch->try_completion = &try_completion; try_catch->try_result = 0; - task_struct = kthread_run(kunit_generic_run_threadfn_adapter, - try_catch, - "kunit_try_catch_thread"); + task_struct = kthread_create(kunit_generic_run_threadfn_adapter, + try_catch, "kunit_try_catch_thread"); if (IS_ERR(task_struct)) { + try_catch->try_result = PTR_ERR(task_struct); try_catch->catch(try_catch->context); return; } + get_task_struct(task_struct); + /* + * As for a vfork(2), task_struct->vfork_done (pointing to the + * underlying kthread->exited) can be used to wait for the end of a + * kernel thread. It is set to NULL when the thread exits, so we + * keep a copy here. + */ + task_done = task_struct->vfork_done; + wake_up_process(task_struct); - time_remaining = wait_for_completion_timeout(&try_completion, + time_remaining = wait_for_completion_timeout(task_done, kunit_test_timeout()); if (time_remaining == 0) { - kunit_err(test, "try timed out\n"); try_catch->try_result = -ETIMEDOUT; kthread_stop(task_struct); } + put_task_struct(task_struct); exit_code = try_catch->try_result; if (!exit_code) @@ -88,8 +100,14 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context) if (exit_code == -EFAULT) try_catch->try_result = 0; - else if (exit_code == -EINTR) - kunit_err(test, "wake_up_process() was never called\n"); + else if (exit_code == -EINTR) { + if (test->last_seen.file) + kunit_err(test, "try faulted: last line seen %s:%d\n", + test->last_seen.file, test->last_seen.line); + else + kunit_err(test, "try faulted\n"); + } else if (exit_code == -ETIMEDOUT) + kunit_err(test, "try timed out\n"); else if (exit_code) kunit_err(test, "Unknown error: %d\n", exit_code); diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c index 859b67c4d697..27e0c8ee71d8 100644 --- a/lib/kunit_iov_iter.c +++ b/lib/kunit_iov_iter.c @@ -139,7 +139,7 @@ static void __init iov_kunit_copy_to_kvec(struct kunit *test) return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -194,7 +194,7 @@ stop: return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } struct bvec_test_range { @@ -302,7 +302,7 @@ static void __init iov_kunit_copy_to_bvec(struct kunit *test) return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -359,7 +359,7 @@ stop: return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } static void iov_kunit_destroy_xarray(void *data) @@ -453,7 +453,7 @@ static void __init iov_kunit_copy_to_xarray(struct kunit *test) return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -516,7 +516,7 @@ stop: return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -596,7 +596,7 @@ static void __init iov_kunit_extract_pages_kvec(struct kunit *test) stop: KUNIT_EXPECT_EQ(test, size, 0); KUNIT_EXPECT_EQ(test, iter.count, 0); - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -674,7 +674,7 @@ static void __init iov_kunit_extract_pages_bvec(struct kunit *test) stop: KUNIT_EXPECT_EQ(test, size, 0); KUNIT_EXPECT_EQ(test, iter.count, 0); - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -753,7 +753,7 @@ static void __init iov_kunit_extract_pages_xarray(struct kunit *test) } stop: - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } static struct kunit_case __refdata iov_kunit_cases[] = { diff --git a/lib/maple_tree.c b/lib/maple_tree.c index af0970288727..2d7d27e6ae3c 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1307,8 +1307,8 @@ static inline void mas_free(struct ma_state *mas, struct maple_enode *used) } /* - * mas_node_count() - Check if enough nodes are allocated and request more if - * there is not enough nodes. + * mas_node_count_gfp() - Check if enough nodes are allocated and request more + * if there is not enough nodes. * @mas: The maple state * @count: The number of nodes needed * @gfp: the gfp flags @@ -2271,8 +2271,6 @@ bool mast_spanning_rebalance(struct maple_subtree_state *mast) struct ma_state l_tmp = *mast->orig_l; unsigned char depth = 0; - r_tmp = *mast->orig_r; - l_tmp = *mast->orig_l; do { mas_ascend(mast->orig_r); mas_ascend(mast->orig_l); @@ -5111,18 +5109,18 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, if (size == 0 || max - min < size - 1) return -EINVAL; - if (mas_is_start(mas)) { + if (mas_is_start(mas)) mas_start(mas); - mas->offset = mas_data_end(mas); - } else if (mas->offset >= 2) { - mas->offset -= 2; - } else if (!mas_rewind_node(mas)) { + else if ((mas->offset < 2) && (!mas_rewind_node(mas))) return -EBUSY; - } - /* Empty set. */ - if (mas_is_none(mas) || mas_is_ptr(mas)) + if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) return mas_sparse_area(mas, min, max, size, false); + else if (mas->offset >= 2) + mas->offset -= 2; + else + mas->offset = mas_data_end(mas); + /* The start of the window can only be within these values. */ mas->index = min; diff --git a/lib/math/div64.c b/lib/math/div64.c index 55a81782e271..191761b1b623 100644 --- a/lib/math/div64.c +++ b/lib/math/div64.c @@ -22,6 +22,7 @@ #include <linux/export.h> #include <linux/math.h> #include <linux/math64.h> +#include <linux/minmax.h> #include <linux/log2.h> /* Not needed on 64bit architectures */ @@ -191,6 +192,20 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) /* can a * b overflow ? */ if (ilog2(a) + ilog2(b) > 62) { /* + * Note that the algorithm after the if block below might lose + * some precision and the result is more exact for b > a. So + * exchange a and b if a is bigger than b. + * + * For example with a = 43980465100800, b = 100000000, c = 1000000000 + * the below calculation doesn't modify b at all because div == 0 + * and then shift becomes 45 + 26 - 62 = 9 and so the result + * becomes 4398035251080. However with a and b swapped the exact + * result is calculated (i.e. 4398046510080). + */ + if (a > b) + swap(a, b); + + /* * (b * a) / c is equal to * * (b / c) * a + diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c index d42cebf7407f..d3b64b10da1c 100644 --- a/lib/math/prime_numbers.c +++ b/lib/math/prime_numbers.c @@ -6,8 +6,6 @@ #include <linux/prime_numbers.h> #include <linux/slab.h> -#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long)) - struct primes { struct rcu_head rcu; unsigned long last, sz; diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c index 30e00ef0bf2e..20ea9038c3ff 100644 --- a/lib/memcpy_kunit.c +++ b/lib/memcpy_kunit.c @@ -309,9 +309,6 @@ static void set_random_nonzero(struct kunit *test, u8 *byte) static void init_large(struct kunit *test) { - if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST)) - kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y"); - /* Get many bit patterns. */ get_random_bytes(large_src, ARRAY_SIZE(large_src)); @@ -496,58 +493,6 @@ static void memmove_overlap_test(struct kunit *test) } } -static void strtomem_test(struct kunit *test) -{ - static const char input[sizeof(unsigned long)] = "hi"; - static const char truncate[] = "this is too long"; - struct { - unsigned long canary1; - unsigned char output[sizeof(unsigned long)] __nonstring; - unsigned long canary2; - } wrap; - - memset(&wrap, 0xFF, sizeof(wrap)); - KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX, - "bad initial canary value"); - KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX, - "bad initial canary value"); - - /* Check unpadded copy leaves surroundings untouched. */ - strtomem(wrap.output, input); - KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); - KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); - KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); - for (size_t i = 2; i < sizeof(wrap.output); i++) - KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF); - KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); - - /* Check truncated copy leaves surroundings untouched. */ - memset(&wrap, 0xFF, sizeof(wrap)); - strtomem(wrap.output, truncate); - KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); - for (size_t i = 0; i < sizeof(wrap.output); i++) - KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); - KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); - - /* Check padded copy leaves only string padded. */ - memset(&wrap, 0xFF, sizeof(wrap)); - strtomem_pad(wrap.output, input, 0xAA); - KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); - KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); - KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); - for (size_t i = 2; i < sizeof(wrap.output); i++) - KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA); - KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); - - /* Check truncated padded copy has no padding. */ - memset(&wrap, 0xFF, sizeof(wrap)); - strtomem(wrap.output, truncate); - KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); - for (size_t i = 0; i < sizeof(wrap.output); i++) - KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); - KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); -} - static struct kunit_case memcpy_test_cases[] = { KUNIT_CASE(memset_test), KUNIT_CASE(memcpy_test), @@ -555,7 +500,6 @@ static struct kunit_case memcpy_test_cases[] = { KUNIT_CASE_SLOW(memmove_test), KUNIT_CASE_SLOW(memmove_large_test), KUNIT_CASE_SLOW(memmove_overlap_test), - KUNIT_CASE(strtomem_test), {} }; diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c index 65e8a72a83bf..4ef31b0bb74d 100644 --- a/lib/overflow_kunit.c +++ b/lib/overflow_kunit.c @@ -1172,6 +1172,24 @@ static void castable_to_type_test(struct kunit *test) #undef TEST_CASTABLE_TO_TYPE } +struct foo { + int a; + u32 counter; + s16 array[] __counted_by(counter); +}; + +static void DEFINE_FLEX_test(struct kunit *test) +{ + DEFINE_RAW_FLEX(struct foo, two, array, 2); + DEFINE_FLEX(struct foo, eight, array, counter, 8); + DEFINE_FLEX(struct foo, empty, array, counter, 0); + + KUNIT_EXPECT_EQ(test, __struct_size(two), + sizeof(struct foo) + sizeof(s16) + sizeof(s16)); + KUNIT_EXPECT_EQ(test, __struct_size(eight), 24); + KUNIT_EXPECT_EQ(test, __struct_size(empty), sizeof(struct foo)); +} + static struct kunit_case overflow_test_cases[] = { KUNIT_CASE(u8_u8__u8_overflow_test), KUNIT_CASE(s8_s8__s8_overflow_test), @@ -1194,6 +1212,7 @@ static struct kunit_case overflow_test_cases[] = { KUNIT_CASE(overflows_type_test), KUNIT_CASE(same_type_test), KUNIT_CASE(castable_to_type_test), + KUNIT_CASE(DEFINE_FLEX_test), {} }; diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c deleted file mode 100644 index ce39ce9f3526..000000000000 --- a/lib/pci_iomap.c +++ /dev/null @@ -1,180 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Implement the default iomap interfaces - * - * (C) Copyright 2004 Linus Torvalds - */ -#include <linux/pci.h> -#include <linux/io.h> - -#include <linux/export.h> - -#ifdef CONFIG_PCI -/** - * pci_iomap_range - create a virtual mapping cookie for a PCI BAR - * @dev: PCI device that owns the BAR - * @bar: BAR number - * @offset: map memory at the given offset in BAR - * @maxlen: max length of the memory to map - * - * Using this function you will get a __iomem address to your device BAR. - * You can access it using ioread*() and iowrite*(). These functions hide - * the details if this is a MMIO or PIO address space and will just do what - * you expect from them in the correct way. - * - * @maxlen specifies the maximum length to map. If you want to get access to - * the complete BAR from offset to the end, pass %0 here. - * */ -void __iomem *pci_iomap_range(struct pci_dev *dev, - int bar, - unsigned long offset, - unsigned long maxlen) -{ - resource_size_t start = pci_resource_start(dev, bar); - resource_size_t len = pci_resource_len(dev, bar); - unsigned long flags = pci_resource_flags(dev, bar); - - if (len <= offset || !start) - return NULL; - len -= offset; - start += offset; - if (maxlen && len > maxlen) - len = maxlen; - if (flags & IORESOURCE_IO) - return __pci_ioport_map(dev, start, len); - if (flags & IORESOURCE_MEM) - return ioremap(start, len); - /* What? */ - return NULL; -} -EXPORT_SYMBOL(pci_iomap_range); - -/** - * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR - * @dev: PCI device that owns the BAR - * @bar: BAR number - * @offset: map memory at the given offset in BAR - * @maxlen: max length of the memory to map - * - * Using this function you will get a __iomem address to your device BAR. - * You can access it using ioread*() and iowrite*(). These functions hide - * the details if this is a MMIO or PIO address space and will just do what - * you expect from them in the correct way. When possible write combining - * is used. - * - * @maxlen specifies the maximum length to map. If you want to get access to - * the complete BAR from offset to the end, pass %0 here. - * */ -void __iomem *pci_iomap_wc_range(struct pci_dev *dev, - int bar, - unsigned long offset, - unsigned long maxlen) -{ - resource_size_t start = pci_resource_start(dev, bar); - resource_size_t len = pci_resource_len(dev, bar); - unsigned long flags = pci_resource_flags(dev, bar); - - - if (flags & IORESOURCE_IO) - return NULL; - - if (len <= offset || !start) - return NULL; - - len -= offset; - start += offset; - if (maxlen && len > maxlen) - len = maxlen; - - if (flags & IORESOURCE_MEM) - return ioremap_wc(start, len); - - /* What? */ - return NULL; -} -EXPORT_SYMBOL_GPL(pci_iomap_wc_range); - -/** - * pci_iomap - create a virtual mapping cookie for a PCI BAR - * @dev: PCI device that owns the BAR - * @bar: BAR number - * @maxlen: length of the memory to map - * - * Using this function you will get a __iomem address to your device BAR. - * You can access it using ioread*() and iowrite*(). These functions hide - * the details if this is a MMIO or PIO address space and will just do what - * you expect from them in the correct way. - * - * @maxlen specifies the maximum length to map. If you want to get access to - * the complete BAR without checking for its length first, pass %0 here. - * */ -void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) -{ - return pci_iomap_range(dev, bar, 0, maxlen); -} -EXPORT_SYMBOL(pci_iomap); - -/** - * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR - * @dev: PCI device that owns the BAR - * @bar: BAR number - * @maxlen: length of the memory to map - * - * Using this function you will get a __iomem address to your device BAR. - * You can access it using ioread*() and iowrite*(). These functions hide - * the details if this is a MMIO or PIO address space and will just do what - * you expect from them in the correct way. When possible write combining - * is used. - * - * @maxlen specifies the maximum length to map. If you want to get access to - * the complete BAR without checking for its length first, pass %0 here. - * */ -void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen) -{ - return pci_iomap_wc_range(dev, bar, 0, maxlen); -} -EXPORT_SYMBOL_GPL(pci_iomap_wc); - -/* - * pci_iounmap() somewhat illogically comes from lib/iomap.c for the - * CONFIG_GENERIC_IOMAP case, because that's the code that knows about - * the different IOMAP ranges. - * - * But if the architecture does not use the generic iomap code, and if - * it has _not_ defined it's own private pci_iounmap function, we define - * it here. - * - * NOTE! This default implementation assumes that if the architecture - * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will - * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [, - * and does not need unmapping with 'ioport_unmap()'. - * - * If you have different rules for your architecture, you need to - * implement your own pci_iounmap() that knows the rules for where - * and how IO vs MEM get mapped. - * - * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes - * from legacy <asm-generic/io.h> header file behavior. In particular, - * it would seem to make sense to do the iounmap(p) for the non-IO-space - * case here regardless, but that's not what the old header file code - * did. Probably incorrectly, but this is meant to be bug-for-bug - * compatible. - */ -#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP) - -void pci_iounmap(struct pci_dev *dev, void __iomem *p) -{ -#ifdef ARCH_HAS_GENERIC_IOPORT_MAP - uintptr_t start = (uintptr_t) PCI_IOBASE; - uintptr_t addr = (uintptr_t) p; - - if (addr >= start && addr < start + IO_SPACE_LIMIT) - return; - iounmap(p); -#endif -} -EXPORT_SYMBOL(pci_iounmap); - -#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */ - -#endif /* CONFIG_PCI */ diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index 1c5420ff254e..385a94aa0b99 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile @@ -21,7 +21,7 @@ altivec_flags += -isystem $(shell $(CC) -print-file-name=include) ifdef CONFIG_CC_IS_CLANG # clang ppc port does not yet support -maltivec when -msoft-float is # enabled. A future release of clang will resolve this -# https://bugs.llvm.org/show_bug.cgi?id=31177 +# https://llvm.org/pr31177 CFLAGS_REMOVE_altivec1.o += -msoft-float CFLAGS_REMOVE_altivec2.o += -msoft-float CFLAGS_REMOVE_altivec4.o += -msoft-float diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 92c6b1fd8989..1e453f825c05 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -494,18 +494,18 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, struct sbitmap_word *map = &sb->map[index]; unsigned long get_mask; unsigned int map_depth = __map_depth(sb, index); + unsigned long val; sbitmap_deferred_clear(map); - if (map->word == (1UL << (map_depth - 1)) - 1) + val = READ_ONCE(map->word); + if (val == (1UL << (map_depth - 1)) - 1) goto next; - nr = find_first_zero_bit(&map->word, map_depth); + nr = find_first_zero_bit(&val, map_depth); if (nr + nr_tags <= map_depth) { atomic_long_t *ptr = (atomic_long_t *) &map->word; - unsigned long val; get_mask = ((1UL << nr_tags) - 1) << nr; - val = READ_ONCE(map->word); while (!atomic_long_try_cmpxchg(ptr, &val, get_mask | val)) ; diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 68b45c82c37a..7bc2220fea80 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -1124,7 +1124,7 @@ static ssize_t extract_user_to_sg(struct iov_iter *iter, do { res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max, extraction_flags, &off); - if (res < 0) + if (res <= 0) goto failed; len = res; diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c index d4a3730b08fa..4ce960438806 100644 --- a/lib/slub_kunit.c +++ b/lib/slub_kunit.c @@ -55,7 +55,7 @@ static void test_next_pointer(struct kunit *test) ptr_addr = (unsigned long *)(p + s->offset); tmp = *ptr_addr; - p[s->offset] = 0x12; + p[s->offset] = ~p[s->offset]; /* * Expecting three errors. diff --git a/lib/sort.c b/lib/sort.c index b399bf10d675..a0509088f82a 100644 --- a/lib/sort.c +++ b/lib/sort.c @@ -215,6 +215,7 @@ void sort_r(void *base, size_t num, size_t size, /* pre-scale counters for performance */ size_t n = num * size, a = (num/2) * size; const unsigned int lsbit = size & -size; /* Used to find parent */ + size_t shift = 0; if (!a) /* num < 2 || size == 0 */ return; @@ -242,12 +243,21 @@ void sort_r(void *base, size_t num, size_t size, for (;;) { size_t b, c, d; - if (a) /* Building heap: sift down --a */ - a -= size; - else if (n -= size) /* Sorting: Extract root to --n */ + if (a) /* Building heap: sift down a */ + a -= size << shift; + else if (n > 3 * size) { /* Sorting: Extract two largest elements */ + n -= size; do_swap(base, base + n, size, swap_func, priv); - else /* Sort complete */ + shift = do_cmp(base + size, base + 2 * size, cmp_func, priv) <= 0; + a = size << shift; + n -= size; + do_swap(base + a, base + n, size, swap_func, priv); + } else if (n > size) { /* Sorting: Extract root */ + n -= size; + do_swap(base, base + n, size, swap_func, priv); + } else { /* Sort complete */ break; + } /* * Sift element at "a" down into heap. This is the @@ -262,7 +272,7 @@ void sort_r(void *base, size_t num, size_t size, * average, 3/4 worst-case.) */ for (b = a; c = 2*b + size, (d = c + size) < n;) - b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d; + b = do_cmp(base + c, base + d, cmp_func, priv) > 0 ? c : d; if (d == n) /* Special case last leaf with no sibling */ b = c; diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 4a7055a63d9f..cd8f23455285 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -36,53 +36,11 @@ #include <linux/memblock.h> #include <linux/kasan-enabled.h> -#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8) - -#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */ -#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER)) -#define DEPOT_STACK_ALIGN 4 -#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN) -#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \ - STACK_DEPOT_EXTRA_BITS) #define DEPOT_POOLS_CAP 8192 +/* The pool_index is offset by 1 so the first record does not have a 0 handle. */ #define DEPOT_MAX_POOLS \ - (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \ - (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP) - -/* Compact structure that stores a reference to a stack. */ -union handle_parts { - depot_stack_handle_t handle; - struct { - u32 pool_index : DEPOT_POOL_INDEX_BITS; - u32 offset : DEPOT_OFFSET_BITS; - u32 extra : STACK_DEPOT_EXTRA_BITS; - }; -}; - -struct stack_record { - struct list_head hash_list; /* Links in the hash table */ - u32 hash; /* Hash in hash table */ - u32 size; /* Number of stored frames */ - union handle_parts handle; /* Constant after initialization */ - refcount_t count; - union { - unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */ - struct { - /* - * An important invariant of the implementation is to - * only place a stack record onto the freelist iff its - * refcount is zero. Because stack records with a zero - * refcount are never considered as valid, it is safe to - * union @entries and freelist management state below. - * Conversely, as soon as an entry is off the freelist - * and its refcount becomes non-zero, the below must not - * be accessed until being placed back on the freelist. - */ - struct list_head free_list; /* Links in the freelist */ - unsigned long rcu_state; /* RCU cookie */ - }; - }; -}; + (((1LL << (DEPOT_POOL_INDEX_BITS)) - 1 < DEPOT_POOLS_CAP) ? \ + (1LL << (DEPOT_POOL_INDEX_BITS)) - 1 : DEPOT_POOLS_CAP) static bool stack_depot_disabled; static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT); @@ -372,7 +330,7 @@ static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size) stack = current_pool + pool_offset; /* Pre-initialize handle once. */ - stack->handle.pool_index = pool_index; + stack->handle.pool_index_plus_1 = pool_index + 1; stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN; stack->handle.extra = 0; INIT_LIST_HEAD(&stack->hash_list); @@ -483,18 +441,19 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) const int pools_num_cached = READ_ONCE(pools_num); union handle_parts parts = { .handle = handle }; void *pool; + u32 pool_index = parts.pool_index_plus_1 - 1; size_t offset = parts.offset << DEPOT_STACK_ALIGN; struct stack_record *stack; lockdep_assert_not_held(&pool_lock); - if (parts.pool_index > pools_num_cached) { + if (pool_index >= pools_num_cached) { WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", - parts.pool_index, pools_num_cached, handle); + pool_index, pools_num_cached, handle); return NULL; } - pool = stack_pools[parts.pool_index]; + pool = stack_pools[pool_index]; if (WARN_ON(!pool)) return NULL; @@ -668,10 +627,10 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, /* * Zero out zone modifiers, as we don't have specific zone * requirements. Keep the flags related to allocation in atomic - * contexts and I/O. + * contexts, I/O, nolockdep. */ alloc_flags &= ~GFP_ZONEMASK; - alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); + alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP); alloc_flags |= __GFP_NOWARN; page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER); if (page) @@ -728,6 +687,14 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries, } EXPORT_SYMBOL_GPL(stack_depot_save); +struct stack_record *__stack_depot_get_stack_record(depot_stack_handle_t handle) +{ + if (!handle) + return NULL; + + return depot_fetch_stack(handle); +} + unsigned int stack_depot_fetch(depot_stack_handle_t handle, unsigned long **entries) { diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c index dc3c68f46f0a..3bc14d1ee816 100644 --- a/lib/stackinit_kunit.c +++ b/lib/stackinit_kunit.c @@ -417,7 +417,7 @@ static noinline int leaf_switch_2_none(unsigned long sp, bool fill, * These are expected to fail for most configurations because neither * GCC nor Clang have a way to perform initialization of variables in * non-code areas (i.e. in a switch statement before the first "case"). - * https://bugs.llvm.org/show_bug.cgi?id=44916 + * https://llvm.org/pr44916 */ DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, ALWAYS_FAIL); DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL); diff --git a/lib/strcat_kunit.c b/lib/strcat_kunit.c deleted file mode 100644 index e21be95514af..000000000000 --- a/lib/strcat_kunit.c +++ /dev/null @@ -1,104 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Kernel module for testing 'strcat' family of functions. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <kunit/test.h> -#include <linux/string.h> - -static volatile int unconst; - -static void strcat_test(struct kunit *test) -{ - char dest[8]; - - /* Destination is terminated. */ - memset(dest, 0, sizeof(dest)); - KUNIT_EXPECT_EQ(test, strlen(dest), 0); - /* Empty copy does nothing. */ - KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest); - KUNIT_EXPECT_STREQ(test, dest, ""); - /* 4 characters copied in, stops at %NUL. */ - KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest); - KUNIT_EXPECT_STREQ(test, dest, "four"); - KUNIT_EXPECT_EQ(test, dest[5], '\0'); - /* 2 more characters copied in okay. */ - KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest); - KUNIT_EXPECT_STREQ(test, dest, "fourAB"); -} - -static void strncat_test(struct kunit *test) -{ - char dest[8]; - - /* Destination is terminated. */ - memset(dest, 0, sizeof(dest)); - KUNIT_EXPECT_EQ(test, strlen(dest), 0); - /* Empty copy of size 0 does nothing. */ - KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest); - KUNIT_EXPECT_STREQ(test, dest, ""); - /* Empty copy of size 1 does nothing too. */ - KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest); - KUNIT_EXPECT_STREQ(test, dest, ""); - /* Copy of max 0 characters should do nothing. */ - KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest); - KUNIT_EXPECT_STREQ(test, dest, ""); - - /* 4 characters copied in, even if max is 8. */ - KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest); - KUNIT_EXPECT_STREQ(test, dest, "four"); - KUNIT_EXPECT_EQ(test, dest[5], '\0'); - KUNIT_EXPECT_EQ(test, dest[6], '\0'); - /* 2 characters copied in okay, 2 ignored. */ - KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest); - KUNIT_EXPECT_STREQ(test, dest, "fourAB"); -} - -static void strlcat_test(struct kunit *test) -{ - char dest[8] = ""; - int len = sizeof(dest) + unconst; - - /* Destination is terminated. */ - KUNIT_EXPECT_EQ(test, strlen(dest), 0); - /* Empty copy is size 0. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0); - KUNIT_EXPECT_STREQ(test, dest, ""); - /* Size 1 should keep buffer terminated, report size of source only. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4); - KUNIT_EXPECT_STREQ(test, dest, ""); - - /* 4 characters copied in. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4); - KUNIT_EXPECT_STREQ(test, dest, "four"); - /* 2 characters copied in okay, gets to 6 total. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6); - KUNIT_EXPECT_STREQ(test, dest, "fourAB"); - /* 2 characters ignored if max size (7) reached. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8); - KUNIT_EXPECT_STREQ(test, dest, "fourAB"); - /* 1 of 2 characters skipped, now at true max size. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9); - KUNIT_EXPECT_STREQ(test, dest, "fourABE"); - /* Everything else ignored, now at full size. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11); - KUNIT_EXPECT_STREQ(test, dest, "fourABE"); -} - -static struct kunit_case strcat_test_cases[] = { - KUNIT_CASE(strcat_test), - KUNIT_CASE(strncat_test), - KUNIT_CASE(strlcat_test), - {} -}; - -static struct kunit_suite strcat_test_suite = { - .name = "strcat", - .test_cases = strcat_test_cases, -}; - -kunit_test_suite(strcat_test_suite); - -MODULE_LICENSE("GPL"); diff --git a/lib/string_kunit.c b/lib/string_kunit.c index eabf025cf77c..2a812decf14b 100644 --- a/lib/string_kunit.c +++ b/lib/string_kunit.c @@ -11,7 +11,13 @@ #include <linux/slab.h> #include <linux/string.h> -static void test_memset16(struct kunit *test) +#define STRCMP_LARGE_BUF_LEN 2048 +#define STRCMP_CHANGE_POINT 1337 +#define STRCMP_TEST_EXPECT_EQUAL(test, fn, ...) KUNIT_EXPECT_EQ(test, fn(__VA_ARGS__), 0) +#define STRCMP_TEST_EXPECT_LOWER(test, fn, ...) KUNIT_EXPECT_LT(test, fn(__VA_ARGS__), 0) +#define STRCMP_TEST_EXPECT_GREATER(test, fn, ...) KUNIT_EXPECT_GT(test, fn(__VA_ARGS__), 0) + +static void string_test_memset16(struct kunit *test) { unsigned i, j, k; u16 v, *p; @@ -40,7 +46,7 @@ static void test_memset16(struct kunit *test) } } -static void test_memset32(struct kunit *test) +static void string_test_memset32(struct kunit *test) { unsigned i, j, k; u32 v, *p; @@ -69,7 +75,7 @@ static void test_memset32(struct kunit *test) } } -static void test_memset64(struct kunit *test) +static void string_test_memset64(struct kunit *test) { unsigned i, j, k; u64 v, *p; @@ -98,7 +104,7 @@ static void test_memset64(struct kunit *test) } } -static void test_strchr(struct kunit *test) +static void string_test_strchr(struct kunit *test) { const char *test_string = "abcdefghijkl"; const char *empty_string = ""; @@ -121,7 +127,7 @@ static void test_strchr(struct kunit *test) KUNIT_ASSERT_NULL(test, result); } -static void test_strnchr(struct kunit *test) +static void string_test_strnchr(struct kunit *test) { const char *test_string = "abcdefghijkl"; const char *empty_string = ""; @@ -154,7 +160,7 @@ static void test_strnchr(struct kunit *test) KUNIT_ASSERT_NULL(test, result); } -static void test_strspn(struct kunit *test) +static void string_test_strspn(struct kunit *test) { static const struct strspn_test { const char str[16]; @@ -179,13 +185,444 @@ static void test_strspn(struct kunit *test) } } +static char strcmp_buffer1[STRCMP_LARGE_BUF_LEN]; +static char strcmp_buffer2[STRCMP_LARGE_BUF_LEN]; + +static void strcmp_fill_buffers(char fill1, char fill2) +{ + memset(strcmp_buffer1, fill1, STRCMP_LARGE_BUF_LEN); + memset(strcmp_buffer2, fill2, STRCMP_LARGE_BUF_LEN); + strcmp_buffer1[STRCMP_LARGE_BUF_LEN - 1] = 0; + strcmp_buffer2[STRCMP_LARGE_BUF_LEN - 1] = 0; +} + +static void string_test_strcmp(struct kunit *test) +{ + /* Equal strings */ + STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "Hello, Kernel!", "Hello, Kernel!"); + /* First string is lexicographically less than the second */ + STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Hello, KUnit!", "Hello, Kernel!"); + /* First string is lexicographically larger than the second */ + STRCMP_TEST_EXPECT_GREATER(test, strcmp, "Hello, Kernel!", "Hello, KUnit!"); + /* Empty string is always lexicographically less than any non-empty string */ + STRCMP_TEST_EXPECT_LOWER(test, strcmp, "", "Non-empty string"); + /* Two empty strings should be equal */ + STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "", ""); + /* Compare two strings which have only one char difference */ + STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Abacaba", "Abadaba"); + /* Compare two strings which have the same prefix*/ + STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Just a string", "Just a string and something else"); +} + +static void string_test_strcmp_long_strings(struct kunit *test) +{ + strcmp_fill_buffers('B', 'B'); + STRCMP_TEST_EXPECT_EQUAL(test, strcmp, strcmp_buffer1, strcmp_buffer2); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A'; + STRCMP_TEST_EXPECT_LOWER(test, strcmp, strcmp_buffer1, strcmp_buffer2); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C'; + STRCMP_TEST_EXPECT_GREATER(test, strcmp, strcmp_buffer1, strcmp_buffer2); +} + +static void string_test_strncmp(struct kunit *test) +{ + /* Equal strings */ + STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, KUnit!", "Hello, KUnit!", 13); + /* First string is lexicographically less than the second */ + STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Hello, KUnit!", "Hello, Kernel!", 13); + /* Result is always 'equal' when count = 0 */ + STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, Kernel!", "Hello, KUnit!", 0); + /* Strings with common prefix are equal if count = length of prefix */ + STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Abacaba", "Abadaba", 3); + /* Strings with common prefix are not equal when count = length of prefix + 1 */ + STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Abacaba", "Abadaba", 4); + /* If one string is a prefix of another, the shorter string is lexicographically smaller */ + STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Just a string", "Just a string and something else", + strlen("Just a string and something else")); + /* + * If one string is a prefix of another, and we check first length + * of prefix chars, the result is 'equal' + */ + STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Just a string", "Just a string and something else", + strlen("Just a string")); +} + +static void string_test_strncmp_long_strings(struct kunit *test) +{ + strcmp_fill_buffers('B', 'B'); + STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_LARGE_BUF_LEN); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A'; + STRCMP_TEST_EXPECT_LOWER(test, strncmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_LARGE_BUF_LEN); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C'; + STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_LARGE_BUF_LEN); + /* the strings are equal up to STRCMP_CHANGE_POINT */ + STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_CHANGE_POINT); + STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_CHANGE_POINT + 1); +} + +static void string_test_strcasecmp(struct kunit *test) +{ + /* Same strings in different case should be equal */ + STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "Hello, Kernel!", "HeLLO, KErNeL!"); + /* Empty strings should be equal */ + STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "", ""); + /* Despite ascii code for 'a' is larger than ascii code for 'B', 'a' < 'B' */ + STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, "a", "B"); + STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, "B", "a"); + /* Special symbols and numbers should be processed correctly */ + STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "-+**.1230ghTTT~^", "-+**.1230Ghttt~^"); +} + +static void string_test_strcasecmp_long_strings(struct kunit *test) +{ + strcmp_fill_buffers('b', 'B'); + STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, strcmp_buffer1, strcmp_buffer2); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a'; + STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C'; + STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2); +} + +static void string_test_strncasecmp(struct kunit *test) +{ + /* Same strings in different case should be equal */ + STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbAcAbA", "Abacaba", strlen("Abacaba")); + /* strncasecmp should check 'count' chars only */ + STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbaCaBa", "abaCaDa", 5); + STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, "a", "B", 1); + STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, "B", "a", 1); + /* Result is always 'equal' when count = 0 */ + STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "Abacaba", "Not abacaba", 0); +} + +static void string_test_strncasecmp_long_strings(struct kunit *test) +{ + strcmp_fill_buffers('b', 'B'); + STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_LARGE_BUF_LEN); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a'; + STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_LARGE_BUF_LEN); + + strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C'; + STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_LARGE_BUF_LEN); + + STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_CHANGE_POINT); + STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1, + strcmp_buffer2, STRCMP_CHANGE_POINT + 1); +} + +/** + * strscpy_check() - Run a specific test case. + * @test: KUnit test context pointer + * @src: Source string, argument to strscpy_pad() + * @count: Size of destination buffer, argument to strscpy_pad() + * @expected: Expected return value from call to strscpy_pad() + * @chars: Number of characters from the src string expected to be + * written to the dst buffer. + * @terminator: 1 if there should be a terminating null byte 0 otherwise. + * @pad: Number of pad characters expected (in the tail of dst buffer). + * (@pad does not include the null terminator byte.) + * + * Calls strscpy_pad() and verifies the return value and state of the + * destination buffer after the call returns. + */ +static void strscpy_check(struct kunit *test, char *src, int count, + int expected, int chars, int terminator, int pad) +{ + int nr_bytes_poison; + int max_expected; + int max_count; + int written; + char buf[6]; + int index, i; + const char POISON = 'z'; + + KUNIT_ASSERT_TRUE_MSG(test, src != NULL, + "null source string not supported"); + + memset(buf, POISON, sizeof(buf)); + /* Future proofing test suite, validate args */ + max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */ + max_expected = count - 1; /* Space for the null */ + + KUNIT_ASSERT_LE_MSG(test, count, max_count, + "count (%d) is too big (%d) ... aborting", count, max_count); + KUNIT_EXPECT_LE_MSG(test, expected, max_expected, + "expected (%d) is bigger than can possibly be returned (%d)", + expected, max_expected); + + written = strscpy_pad(buf, src, count); + KUNIT_ASSERT_EQ(test, written, expected); + + if (count && written == -E2BIG) { + KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1), + "buffer state invalid for -E2BIG"); + KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0', + "too big string is not null terminated correctly"); + } + + for (i = 0; i < chars; i++) + KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i], + "buf[i]==%c != src[i]==%c", buf[i], src[i]); + + if (terminator) + KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0', + "string is not null terminated correctly"); + + for (i = 0; i < pad; i++) { + index = chars + terminator + i; + KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0', + "padding missing at index: %d", i); + } + + nr_bytes_poison = sizeof(buf) - chars - terminator - pad; + for (i = 0; i < nr_bytes_poison; i++) { + index = sizeof(buf) - 1 - i; /* Check from the end back */ + KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON, + "poison value missing at index: %d", i); + } +} + +static void string_test_strscpy(struct kunit *test) +{ + char dest[8]; + + /* + * strscpy_check() uses a destination buffer of size 6 and needs at + * least 2 characters spare (one for null and one to check for + * overflow). This means we should only call tc() with + * strings up to a maximum of 4 characters long and 'count' + * should not exceed 4. To test with longer strings increase + * the buffer size in tc(). + */ + + /* strscpy_check(test, src, count, expected, chars, terminator, pad) */ + strscpy_check(test, "a", 0, -E2BIG, 0, 0, 0); + strscpy_check(test, "", 0, -E2BIG, 0, 0, 0); + + strscpy_check(test, "a", 1, -E2BIG, 0, 1, 0); + strscpy_check(test, "", 1, 0, 0, 1, 0); + + strscpy_check(test, "ab", 2, -E2BIG, 1, 1, 0); + strscpy_check(test, "a", 2, 1, 1, 1, 0); + strscpy_check(test, "", 2, 0, 0, 1, 1); + + strscpy_check(test, "abc", 3, -E2BIG, 2, 1, 0); + strscpy_check(test, "ab", 3, 2, 2, 1, 0); + strscpy_check(test, "a", 3, 1, 1, 1, 1); + strscpy_check(test, "", 3, 0, 0, 1, 2); + + strscpy_check(test, "abcd", 4, -E2BIG, 3, 1, 0); + strscpy_check(test, "abc", 4, 3, 3, 1, 0); + strscpy_check(test, "ab", 4, 2, 2, 1, 1); + strscpy_check(test, "a", 4, 1, 1, 1, 2); + strscpy_check(test, "", 4, 0, 0, 1, 3); + + /* Compile-time-known source strings. */ + KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0); + KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0); + KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0); + KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG); + KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5); + KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG); + KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG); + KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG); + KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG); +} + +static volatile int unconst; + +static void string_test_strcat(struct kunit *test) +{ + char dest[8]; + + /* Destination is terminated. */ + memset(dest, 0, sizeof(dest)); + KUNIT_EXPECT_EQ(test, strlen(dest), 0); + /* Empty copy does nothing. */ + KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* 4 characters copied in, stops at %NUL. */ + KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest); + KUNIT_EXPECT_STREQ(test, dest, "four"); + KUNIT_EXPECT_EQ(test, dest[5], '\0'); + /* 2 more characters copied in okay. */ + KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); +} + +static void string_test_strncat(struct kunit *test) +{ + char dest[8]; + + /* Destination is terminated. */ + memset(dest, 0, sizeof(dest)); + KUNIT_EXPECT_EQ(test, strlen(dest), 0); + /* Empty copy of size 0 does nothing. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* Empty copy of size 1 does nothing too. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* Copy of max 0 characters should do nothing. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + + /* 4 characters copied in, even if max is 8. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, "four"); + KUNIT_EXPECT_EQ(test, dest[5], '\0'); + KUNIT_EXPECT_EQ(test, dest[6], '\0'); + /* 2 characters copied in okay, 2 ignored. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); +} + +static void string_test_strlcat(struct kunit *test) +{ + char dest[8] = ""; + int len = sizeof(dest) + unconst; + + /* Destination is terminated. */ + KUNIT_EXPECT_EQ(test, strlen(dest), 0); + /* Empty copy is size 0. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* Size 1 should keep buffer terminated, report size of source only. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4); + KUNIT_EXPECT_STREQ(test, dest, ""); + + /* 4 characters copied in. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4); + KUNIT_EXPECT_STREQ(test, dest, "four"); + /* 2 characters copied in okay, gets to 6 total. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); + /* 2 characters ignored if max size (7) reached. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); + /* 1 of 2 characters skipped, now at true max size. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9); + KUNIT_EXPECT_STREQ(test, dest, "fourABE"); + /* Everything else ignored, now at full size. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11); + KUNIT_EXPECT_STREQ(test, dest, "fourABE"); +} + +static void string_test_strtomem(struct kunit *test) +{ + static const char input[sizeof(unsigned long)] = "hi"; + static const char truncate[] = "this is too long"; + struct { + unsigned long canary1; + unsigned char output[sizeof(unsigned long)] __nonstring; + unsigned long canary2; + } wrap; + + memset(&wrap, 0xFF, sizeof(wrap)); + KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX, + "bad initial canary value"); + KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX, + "bad initial canary value"); + + /* Check unpadded copy leaves surroundings untouched. */ + strtomem(wrap.output, input); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); + KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); + for (size_t i = 2; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); + + /* Check truncated copy leaves surroundings untouched. */ + memset(&wrap, 0xFF, sizeof(wrap)); + strtomem(wrap.output, truncate); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + for (size_t i = 0; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); + + /* Check padded copy leaves only string padded. */ + memset(&wrap, 0xFF, sizeof(wrap)); + strtomem_pad(wrap.output, input, 0xAA); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); + KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); + for (size_t i = 2; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); + + /* Check truncated padded copy has no padding. */ + memset(&wrap, 0xFF, sizeof(wrap)); + strtomem(wrap.output, truncate); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + for (size_t i = 0; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); +} + + +static void string_test_memtostr(struct kunit *test) +{ + char nonstring[7] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g' }; + char nonstring_small[3] = { 'a', 'b', 'c' }; + char dest[sizeof(nonstring) + 1]; + + /* Copy in a non-NUL-terminated string into exactly right-sized dest. */ + KUNIT_EXPECT_EQ(test, sizeof(dest), sizeof(nonstring) + 1); + memset(dest, 'X', sizeof(dest)); + memtostr(dest, nonstring); + KUNIT_EXPECT_STREQ(test, dest, "abcdefg"); + memset(dest, 'X', sizeof(dest)); + memtostr(dest, nonstring_small); + KUNIT_EXPECT_STREQ(test, dest, "abc"); + KUNIT_EXPECT_EQ(test, dest[7], 'X'); + + memset(dest, 'X', sizeof(dest)); + memtostr_pad(dest, nonstring); + KUNIT_EXPECT_STREQ(test, dest, "abcdefg"); + memset(dest, 'X', sizeof(dest)); + memtostr_pad(dest, nonstring_small); + KUNIT_EXPECT_STREQ(test, dest, "abc"); + KUNIT_EXPECT_EQ(test, dest[7], '\0'); +} + static struct kunit_case string_test_cases[] = { - KUNIT_CASE(test_memset16), - KUNIT_CASE(test_memset32), - KUNIT_CASE(test_memset64), - KUNIT_CASE(test_strchr), - KUNIT_CASE(test_strnchr), - KUNIT_CASE(test_strspn), + KUNIT_CASE(string_test_memset16), + KUNIT_CASE(string_test_memset32), + KUNIT_CASE(string_test_memset64), + KUNIT_CASE(string_test_strchr), + KUNIT_CASE(string_test_strnchr), + KUNIT_CASE(string_test_strspn), + KUNIT_CASE(string_test_strcmp), + KUNIT_CASE(string_test_strcmp_long_strings), + KUNIT_CASE(string_test_strncmp), + KUNIT_CASE(string_test_strncmp_long_strings), + KUNIT_CASE(string_test_strcasecmp), + KUNIT_CASE(string_test_strcasecmp_long_strings), + KUNIT_CASE(string_test_strncasecmp), + KUNIT_CASE(string_test_strncasecmp_long_strings), + KUNIT_CASE(string_test_strscpy), + KUNIT_CASE(string_test_strcat), + KUNIT_CASE(string_test_strncat), + KUNIT_CASE(string_test_strlcat), + KUNIT_CASE(string_test_strtomem), + KUNIT_CASE(string_test_memtostr), {} }; diff --git a/lib/strscpy_kunit.c b/lib/strscpy_kunit.c deleted file mode 100644 index a6b6344354ed..000000000000 --- a/lib/strscpy_kunit.c +++ /dev/null @@ -1,142 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Kernel module for testing 'strscpy' family of functions. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <kunit/test.h> -#include <linux/string.h> - -/* - * tc() - Run a specific test case. - * @src: Source string, argument to strscpy_pad() - * @count: Size of destination buffer, argument to strscpy_pad() - * @expected: Expected return value from call to strscpy_pad() - * @terminator: 1 if there should be a terminating null byte 0 otherwise. - * @chars: Number of characters from the src string expected to be - * written to the dst buffer. - * @pad: Number of pad characters expected (in the tail of dst buffer). - * (@pad does not include the null terminator byte.) - * - * Calls strscpy_pad() and verifies the return value and state of the - * destination buffer after the call returns. - */ -static void tc(struct kunit *test, char *src, int count, int expected, - int chars, int terminator, int pad) -{ - int nr_bytes_poison; - int max_expected; - int max_count; - int written; - char buf[6]; - int index, i; - const char POISON = 'z'; - - KUNIT_ASSERT_TRUE_MSG(test, src != NULL, - "null source string not supported"); - - memset(buf, POISON, sizeof(buf)); - /* Future proofing test suite, validate args */ - max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */ - max_expected = count - 1; /* Space for the null */ - - KUNIT_ASSERT_LE_MSG(test, count, max_count, - "count (%d) is too big (%d) ... aborting", count, max_count); - KUNIT_EXPECT_LE_MSG(test, expected, max_expected, - "expected (%d) is bigger than can possibly be returned (%d)", - expected, max_expected); - - written = strscpy_pad(buf, src, count); - KUNIT_ASSERT_EQ(test, written, expected); - - if (count && written == -E2BIG) { - KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1), - "buffer state invalid for -E2BIG"); - KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0', - "too big string is not null terminated correctly"); - } - - for (i = 0; i < chars; i++) - KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i], - "buf[i]==%c != src[i]==%c", buf[i], src[i]); - - if (terminator) - KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0', - "string is not null terminated correctly"); - - for (i = 0; i < pad; i++) { - index = chars + terminator + i; - KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0', - "padding missing at index: %d", i); - } - - nr_bytes_poison = sizeof(buf) - chars - terminator - pad; - for (i = 0; i < nr_bytes_poison; i++) { - index = sizeof(buf) - 1 - i; /* Check from the end back */ - KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON, - "poison value missing at index: %d", i); - } -} - -static void strscpy_test(struct kunit *test) -{ - char dest[8]; - - /* - * tc() uses a destination buffer of size 6 and needs at - * least 2 characters spare (one for null and one to check for - * overflow). This means we should only call tc() with - * strings up to a maximum of 4 characters long and 'count' - * should not exceed 4. To test with longer strings increase - * the buffer size in tc(). - */ - - /* tc(test, src, count, expected, chars, terminator, pad) */ - tc(test, "a", 0, -E2BIG, 0, 0, 0); - tc(test, "", 0, -E2BIG, 0, 0, 0); - - tc(test, "a", 1, -E2BIG, 0, 1, 0); - tc(test, "", 1, 0, 0, 1, 0); - - tc(test, "ab", 2, -E2BIG, 1, 1, 0); - tc(test, "a", 2, 1, 1, 1, 0); - tc(test, "", 2, 0, 0, 1, 1); - - tc(test, "abc", 3, -E2BIG, 2, 1, 0); - tc(test, "ab", 3, 2, 2, 1, 0); - tc(test, "a", 3, 1, 1, 1, 1); - tc(test, "", 3, 0, 0, 1, 2); - - tc(test, "abcd", 4, -E2BIG, 3, 1, 0); - tc(test, "abc", 4, 3, 3, 1, 0); - tc(test, "ab", 4, 2, 2, 1, 1); - tc(test, "a", 4, 1, 1, 1, 2); - tc(test, "", 4, 0, 0, 1, 3); - - /* Compile-time-known source strings. */ - KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0); - KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0); - KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0); - KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG); - KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5); - KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG); - KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG); - KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG); - KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG); -} - -static struct kunit_case strscpy_test_cases[] = { - KUNIT_CASE(strscpy_test), - {} -}; - -static struct kunit_suite strscpy_test_suite = { - .name = "strscpy", - .test_cases = strscpy_test_cases, -}; - -kunit_test_suite(strscpy_test_suite); - -MODULE_AUTHOR("Tobin C. Harding <tobin@kernel.org>"); -MODULE_LICENSE("GPL"); diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index 6b2b33579f56..83019beabce4 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -60,18 +60,17 @@ static const unsigned long exp3_1_0[] __initconst = { }; static bool __init -__check_eq_uint(const char *srcfile, unsigned int line, - const unsigned int exp_uint, unsigned int x) +__check_eq_ulong(const char *srcfile, unsigned int line, + const unsigned long exp_ulong, unsigned long x) { - if (exp_uint != x) { - pr_err("[%s:%u] expected %u, got %u\n", - srcfile, line, exp_uint, x); + if (exp_ulong != x) { + pr_err("[%s:%u] expected %lu, got %lu\n", + srcfile, line, exp_ulong, x); return false; } return true; } - static bool __init __check_eq_bitmap(const char *srcfile, unsigned int line, const unsigned long *exp_bmap, const unsigned long *bmap, @@ -185,7 +184,8 @@ __check_eq_str(const char *srcfile, unsigned int line, result; \ }) -#define expect_eq_uint(...) __expect_eq(uint, ##__VA_ARGS__) +#define expect_eq_ulong(...) __expect_eq(ulong, ##__VA_ARGS__) +#define expect_eq_uint(x, y) expect_eq_ulong((unsigned int)(x), (unsigned int)(y)) #define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__) #define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__) #define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__) @@ -548,7 +548,7 @@ static void __init test_bitmap_parselist(void) } if (ptest.flags & PARSE_TIME) - pr_err("parselist: %d: input is '%s' OK, Time: %llu\n", + pr_info("parselist: %d: input is '%s' OK, Time: %llu\n", i, ptest.in, time); #undef ptest @@ -587,7 +587,7 @@ static void __init test_bitmap_printlist(void) goto out; } - pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time); + pr_info("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time); out: kfree(buf); kfree(bmap); @@ -665,7 +665,7 @@ static void __init test_bitmap_parse(void) } if (test.flags & PARSE_TIME) - pr_err("parse: %d: input is '%s' OK, Time: %llu\n", + pr_info("parse: %d: input is '%s' OK, Time: %llu\n", i, test.in, time); } } @@ -1245,14 +1245,7 @@ static void __init test_bitmap_const_eval(void) * in runtime. */ - /* - * Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }`. - * Clang on s390 optimizes bitops at compile-time as intended, but at - * the same time stops treating @bitmap and @bitopvar as compile-time - * constants after regular test_bit() is executed, thus triggering the - * build bugs below. So, call const_test_bit() there directly until - * the compiler is fixed. - */ + /* Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }` */ bitmap_clear(bitmap, 0, BITS_PER_LONG); if (!test_bit(7, bitmap)) bitmap_set(bitmap, 5, 2); @@ -1284,8 +1277,179 @@ static void __init test_bitmap_const_eval(void) /* ~BIT(25) */ BUILD_BUG_ON(!__builtin_constant_p(~var)); BUILD_BUG_ON(~var != ~BIT(25)); + + /* ~BIT(25) | BIT(25) == ~0UL */ + bitmap_complement(&var, &var, BITS_PER_LONG); + __assign_bit(25, &var, true); + + /* !(~(~0UL)) == 1 */ + res = bitmap_full(&var, BITS_PER_LONG); + BUILD_BUG_ON(!__builtin_constant_p(res)); + BUILD_BUG_ON(!res); +} + +/* + * Test bitmap should be big enough to include the cases when start is not in + * the first word, and start+nbits lands in the following word. + */ +#define TEST_BIT_LEN (1000) + +/* + * Helper function to test bitmap_write() overwriting the chosen byte pattern. + */ +static void __init test_bitmap_write_helper(const char *pattern) +{ + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); + DECLARE_BITMAP(exp_bitmap, TEST_BIT_LEN); + DECLARE_BITMAP(pat_bitmap, TEST_BIT_LEN); + unsigned long w, r, bit; + int i, n, nbits; + + /* + * Only parse the pattern once and store the result in the intermediate + * bitmap. + */ + bitmap_parselist(pattern, pat_bitmap, TEST_BIT_LEN); + + /* + * Check that writing a single bit does not accidentally touch the + * adjacent bits. + */ + for (i = 0; i < TEST_BIT_LEN; i++) { + bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN); + bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN); + for (bit = 0; bit <= 1; bit++) { + bitmap_write(bitmap, bit, i, 1); + __assign_bit(i, exp_bitmap, bit); + expect_eq_bitmap(exp_bitmap, bitmap, + TEST_BIT_LEN); + } + } + + /* Ensure writing 0 bits does not change anything. */ + bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN); + bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN); + for (i = 0; i < TEST_BIT_LEN; i++) { + bitmap_write(bitmap, ~0UL, i, 0); + expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN); + } + + for (nbits = BITS_PER_LONG; nbits >= 1; nbits--) { + w = IS_ENABLED(CONFIG_64BIT) ? 0xdeadbeefdeadbeefUL + : 0xdeadbeefUL; + w >>= (BITS_PER_LONG - nbits); + for (i = 0; i <= TEST_BIT_LEN - nbits; i++) { + bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN); + bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN); + for (n = 0; n < nbits; n++) + __assign_bit(i + n, exp_bitmap, w & BIT(n)); + bitmap_write(bitmap, w, i, nbits); + expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN); + r = bitmap_read(bitmap, i, nbits); + expect_eq_ulong(r, w); + } + } +} + +static void __init test_bitmap_read_write(void) +{ + unsigned char *pattern[3] = {"", "all:1/2", "all"}; + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); + unsigned long zero_bits = 0, bits_per_long = BITS_PER_LONG; + unsigned long val; + int i, pi; + + /* + * Reading/writing zero bits should not crash the kernel. + * READ_ONCE() prevents constant folding. + */ + bitmap_write(NULL, 0, 0, READ_ONCE(zero_bits)); + /* Return value of bitmap_read() is undefined here. */ + bitmap_read(NULL, 0, READ_ONCE(zero_bits)); + + /* + * Reading/writing more than BITS_PER_LONG bits should not crash the + * kernel. READ_ONCE() prevents constant folding. + */ + bitmap_write(NULL, 0, 0, READ_ONCE(bits_per_long) + 1); + /* Return value of bitmap_read() is undefined here. */ + bitmap_read(NULL, 0, READ_ONCE(bits_per_long) + 1); + + /* + * Ensure that bitmap_read() reads the same value that was previously + * written, and two consequent values are correctly merged. + * The resulting bit pattern is asymmetric to rule out possible issues + * with bit numeration order. + */ + for (i = 0; i < TEST_BIT_LEN - 7; i++) { + bitmap_zero(bitmap, TEST_BIT_LEN); + + bitmap_write(bitmap, 0b10101UL, i, 5); + val = bitmap_read(bitmap, i, 5); + expect_eq_ulong(0b10101UL, val); + + bitmap_write(bitmap, 0b101UL, i + 5, 3); + val = bitmap_read(bitmap, i + 5, 3); + expect_eq_ulong(0b101UL, val); + + val = bitmap_read(bitmap, i, 8); + expect_eq_ulong(0b10110101UL, val); + } + + for (pi = 0; pi < ARRAY_SIZE(pattern); pi++) + test_bitmap_write_helper(pattern[pi]); } +static void __init test_bitmap_read_perf(void) +{ + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); + unsigned int cnt, nbits, i; + unsigned long val; + ktime_t time; + + bitmap_fill(bitmap, TEST_BIT_LEN); + time = ktime_get(); + for (cnt = 0; cnt < 5; cnt++) { + for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) { + for (i = 0; i < TEST_BIT_LEN; i++) { + if (i + nbits > TEST_BIT_LEN) + break; + /* + * Prevent the compiler from optimizing away the + * bitmap_read() by using its value. + */ + WRITE_ONCE(val, bitmap_read(bitmap, i, nbits)); + } + } + } + time = ktime_get() - time; + pr_info("Time spent in %s:\t%llu\n", __func__, time); +} + +static void __init test_bitmap_write_perf(void) +{ + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); + unsigned int cnt, nbits, i; + unsigned long val = 0xfeedface; + ktime_t time; + + bitmap_zero(bitmap, TEST_BIT_LEN); + time = ktime_get(); + for (cnt = 0; cnt < 5; cnt++) { + for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) { + for (i = 0; i < TEST_BIT_LEN; i++) { + if (i + nbits > TEST_BIT_LEN) + break; + bitmap_write(bitmap, val, i, nbits); + } + } + } + time = ktime_get() - time; + pr_info("Time spent in %s:\t%llu\n", __func__, time); +} + +#undef TEST_BIT_LEN + static void __init selftest(void) { test_zero_clear(); @@ -1303,6 +1467,9 @@ static void __init selftest(void) test_bitmap_cut(); test_bitmap_print_buf(); test_bitmap_const_eval(); + test_bitmap_read_write(); + test_bitmap_read_perf(); + test_bitmap_write_perf(); test_find_nth_bit(); test_for_each_set_bit(); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 569e6d2dc55c..207ff87194db 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -13431,7 +13431,7 @@ static struct bpf_test tests[] = { .stack_depth = 8, .nr_testruns = NR_PATTERN_RUNS, }, - /* 64-bit atomic magnitudes */ + /* 32-bit atomic magnitudes */ { "ATOMIC_W_ADD: all operand magnitudes", { }, diff --git a/lib/test_kmod.c b/lib/test_kmod.c index 43d9dfd57ab7..1eec3b7ac67c 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -58,11 +58,14 @@ static int num_test_devs; * @need_mod_put for your tests case. */ enum kmod_test_case { + /* private: */ __TEST_KMOD_INVALID = 0, + /* public: */ TEST_KMOD_DRIVER, TEST_KMOD_FS_TYPE, + /* private: */ __TEST_KMOD_MAX, }; @@ -82,6 +85,7 @@ struct kmod_test_device; * @ret_sync: return value if request_module() is used, sync request for * @TEST_KMOD_DRIVER * @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE + * @task_sync: kthread's task_struct or %NULL if not running * @thread_idx: thread ID * @test_dev: test device test is being performed under * @need_mod_put: Some tests (get_fs_type() is one) requires putting the module @@ -108,7 +112,7 @@ struct kmod_test_device_info { * @dev: pointer to misc_dev's own struct device * @config_mutex: protects configuration of test * @trigger_mutex: the test trigger can only be fired once at a time - * @thread_lock: protects @done count, and the @info per each thread + * @thread_mutex: protects @done count, and the @info per each thread * @done: number of threads which have completed or failed * @test_is_oom: when we run out of memory, use this to halt moving forward * @kthreads_done: completion used to signal when all work is done diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c index 276c12140ee2..c288df9372ed 100644 --- a/lib/test_ubsan.c +++ b/lib/test_ubsan.c @@ -134,7 +134,7 @@ static const test_ubsan_fp test_ubsan_array[] = { }; /* Excluded because they Oops the module. */ -static const test_ubsan_fp skip_ubsan_array[] = { +static __used const test_ubsan_fp skip_ubsan_array[] = { test_ubsan_divrem_overflow, }; diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index 3718d9886407..4ddf769861ff 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -117,7 +117,7 @@ static int align_shift_alloc_test(void) int i; for (i = 0; i < BITS_PER_LONG; i++) { - align = ((unsigned long) 1) << i; + align = 1UL << i; ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0, __builtin_return_address(0)); @@ -501,7 +501,7 @@ static int test_func(void *private) } static int -init_test_configurtion(void) +init_test_configuration(void) { /* * A maximum number of workers is defined as hard-coded @@ -531,7 +531,7 @@ static void do_concurrent_test(void) /* * Set some basic configurations plus sanity check. */ - ret = init_test_configurtion(); + ret = init_test_configuration(); if (ret < 0) return; @@ -600,12 +600,7 @@ static int vmalloc_test_init(void) return -EAGAIN; /* Fail will directly unload the module */ } -static void vmalloc_test_exit(void) -{ -} - module_init(vmalloc_test_init) -module_exit(vmalloc_test_exit) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Uladzislau Rezki"); diff --git a/lib/test_xarray.c b/lib/test_xarray.c index e77d4856442c..928fc20337e6 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -423,6 +423,59 @@ static noinline void check_cmpxchg(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); } +static noinline void check_cmpxchg_order(struct xarray *xa) +{ +#ifdef CONFIG_XARRAY_MULTI + void *FIVE = xa_mk_value(5); + unsigned int i, order = 3; + + XA_BUG_ON(xa, xa_store_order(xa, 0, order, FIVE, GFP_KERNEL)); + + /* Check entry FIVE has the order saved */ + XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != order); + + /* Check all the tied indexes have the same entry and order */ + for (i = 0; i < (1 << order); i++) { + XA_BUG_ON(xa, xa_load(xa, i) != FIVE); + XA_BUG_ON(xa, xa_get_order(xa, i) != order); + } + + /* Ensure that nothing is stored at index '1 << order' */ + XA_BUG_ON(xa, xa_load(xa, 1 << order) != NULL); + + /* + * Additionally, keep the node information and the order at + * '1 << order' + */ + XA_BUG_ON(xa, xa_store_order(xa, 1 << order, order, FIVE, GFP_KERNEL)); + for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) { + XA_BUG_ON(xa, xa_load(xa, i) != FIVE); + XA_BUG_ON(xa, xa_get_order(xa, i) != order); + } + + /* Conditionally replace FIVE entry at index '0' with NULL */ + XA_BUG_ON(xa, xa_cmpxchg(xa, 0, FIVE, NULL, GFP_KERNEL) != FIVE); + + /* Verify the order is lost at FIVE (and old) entries */ + XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != 0); + + /* Verify the order and entries are lost in all the tied indexes */ + for (i = 0; i < (1 << order); i++) { + XA_BUG_ON(xa, xa_load(xa, i) != NULL); + XA_BUG_ON(xa, xa_get_order(xa, i) != 0); + } + + /* Verify node and order are kept at '1 << order' */ + for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) { + XA_BUG_ON(xa, xa_load(xa, i) != FIVE); + XA_BUG_ON(xa, xa_get_order(xa, i) != order); + } + + xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); + XA_BUG_ON(xa, !xa_empty(xa)); +#endif +} + static noinline void check_reserve(struct xarray *xa) { void *entry; @@ -674,6 +727,186 @@ static noinline void check_multi_store(struct xarray *xa) #endif } +#ifdef CONFIG_XARRAY_MULTI +/* mimics page cache __filemap_add_folio() */ +static noinline void check_xa_multi_store_adv_add(struct xarray *xa, + unsigned long index, + unsigned int order, + void *p) +{ + XA_STATE(xas, xa, index); + unsigned int nrpages = 1UL << order; + + /* users are responsible for index alignemnt to the order when adding */ + XA_BUG_ON(xa, index & (nrpages - 1)); + + xas_set_order(&xas, index, order); + + do { + xas_lock_irq(&xas); + xas_store(&xas, p); + xas_unlock_irq(&xas); + /* + * In our selftest case the only failure we can expect is for + * there not to be enough memory as we're not mimicking the + * entire page cache, so verify that's the only error we can run + * into here. The xas_nomem() which follows will ensure to fix + * that condition for us so to chug on on the loop. + */ + XA_BUG_ON(xa, xas_error(&xas) && xas_error(&xas) != -ENOMEM); + } while (xas_nomem(&xas, GFP_KERNEL)); + + XA_BUG_ON(xa, xas_error(&xas)); + XA_BUG_ON(xa, xa_load(xa, index) != p); +} + +/* mimics page_cache_delete() */ +static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa, + unsigned long index, + unsigned int order) +{ + XA_STATE(xas, xa, index); + + xas_set_order(&xas, index, order); + xas_store(&xas, NULL); + xas_init_marks(&xas); +} + +static noinline void check_xa_multi_store_adv_delete(struct xarray *xa, + unsigned long index, + unsigned int order) +{ + xa_lock_irq(xa); + check_xa_multi_store_adv_del_entry(xa, index, order); + xa_unlock_irq(xa); +} + +/* mimics page cache filemap_get_entry() */ +static noinline void *test_get_entry(struct xarray *xa, unsigned long index) +{ + XA_STATE(xas, xa, index); + void *p; + static unsigned int loops = 0; + + rcu_read_lock(); +repeat: + xas_reset(&xas); + p = xas_load(&xas); + if (xas_retry(&xas, p)) + goto repeat; + rcu_read_unlock(); + + /* + * This is not part of the page cache, this selftest is pretty + * aggressive and does not want to trust the xarray API but rather + * test it, and for order 20 (4 GiB block size) we can loop over + * over a million entries which can cause a soft lockup. Page cache + * APIs won't be stupid, proper page cache APIs loop over the proper + * order so when using a larger order we skip shared entries. + */ + if (++loops % XA_CHECK_SCHED == 0) + schedule(); + + return p; +} + +static unsigned long some_val = 0xdeadbeef; +static unsigned long some_val_2 = 0xdeaddead; + +/* mimics the page cache usage */ +static noinline void check_xa_multi_store_adv(struct xarray *xa, + unsigned long pos, + unsigned int order) +{ + unsigned int nrpages = 1UL << order; + unsigned long index, base, next_index, next_next_index; + unsigned int i; + + index = pos >> PAGE_SHIFT; + base = round_down(index, nrpages); + next_index = round_down(base + nrpages, nrpages); + next_next_index = round_down(next_index + nrpages, nrpages); + + check_xa_multi_store_adv_add(xa, base, order, &some_val); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val); + + XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL); + + /* Use order 0 for the next item */ + check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2); + XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2); + + /* Remove the next item */ + check_xa_multi_store_adv_delete(xa, next_index, 0); + + /* Now use order for a new pointer */ + check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); + + check_xa_multi_store_adv_delete(xa, next_index, order); + check_xa_multi_store_adv_delete(xa, base, order); + XA_BUG_ON(xa, !xa_empty(xa)); + + /* starting fresh again */ + + /* let's test some holes now */ + + /* hole at base and next_next */ + check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL); + + check_xa_multi_store_adv_delete(xa, next_index, order); + XA_BUG_ON(xa, !xa_empty(xa)); + + /* hole at base and next */ + + check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != NULL); + + for (i = 0; i < nrpages; i++) + XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2); + + check_xa_multi_store_adv_delete(xa, next_next_index, order); + XA_BUG_ON(xa, !xa_empty(xa)); +} +#endif + +static noinline void check_multi_store_advanced(struct xarray *xa) +{ +#ifdef CONFIG_XARRAY_MULTI + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; + unsigned long end = ULONG_MAX/2; + unsigned long pos, i; + + /* + * About 117 million tests below. + */ + for (pos = 7; pos < end; pos = (pos * pos) + 564) { + for (i = 0; i < max_order; i++) { + check_xa_multi_store_adv(xa, pos, i); + check_xa_multi_store_adv(xa, pos + 157, i); + } + } +#endif +} + static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) { int i; @@ -1555,9 +1788,11 @@ static void check_split_1(struct xarray *xa, unsigned long index, unsigned int order, unsigned int new_order) { XA_STATE_ORDER(xas, xa, index, new_order); - unsigned int i; + unsigned int i, found; + void *entry; xa_store_order(xa, index, order, xa, GFP_KERNEL); + xa_set_mark(xa, index, XA_MARK_1); xas_split_alloc(&xas, xa, order, GFP_KERNEL); xas_lock(&xas); @@ -1574,6 +1809,16 @@ static void check_split_1(struct xarray *xa, unsigned long index, xa_set_mark(xa, index, XA_MARK_0); XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); + xas_set_order(&xas, index, 0); + found = 0; + rcu_read_lock(); + xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) { + found++; + XA_BUG_ON(xa, xa_is_internal(entry)); + } + rcu_read_unlock(); + XA_BUG_ON(xa, found != 1 << (order - new_order)); + xa_destroy(xa); } @@ -1801,9 +2046,11 @@ static int xarray_checks(void) check_xas_erase(&array); check_insert(&array); check_cmpxchg(&array); + check_cmpxchg_order(&array); check_reserve(&array); check_reserve(&xa0); check_multi_store(&array); + check_multi_store_advanced(&array); check_get_order(&array); check_xa_alloc(); check_find(&array); diff --git a/lib/ubsan.c b/lib/ubsan.c index 5fc107f61934..a1c983d148f1 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -44,9 +44,10 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type) case ubsan_shift_out_of_bounds: return "UBSAN: shift out of bounds"; #endif -#ifdef CONFIG_UBSAN_DIV_ZERO +#if defined(CONFIG_UBSAN_DIV_ZERO) || defined(CONFIG_UBSAN_SIGNED_WRAP) /* - * SanitizerKind::IntegerDivideByZero emits + * SanitizerKind::IntegerDivideByZero and + * SanitizerKind::SignedIntegerOverflow emit * SanitizerHandler::DivremOverflow. */ case ubsan_divrem_overflow: @@ -78,6 +79,19 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type) case ubsan_type_mismatch: return "UBSAN: type mismatch"; #endif +#ifdef CONFIG_UBSAN_SIGNED_WRAP + /* + * SanitizerKind::SignedIntegerOverflow emits + * SanitizerHandler::AddOverflow, SanitizerHandler::SubOverflow, + * or SanitizerHandler::MulOverflow. + */ + case ubsan_add_overflow: + return "UBSAN: integer addition overflow"; + case ubsan_sub_overflow: + return "UBSAN: integer subtraction overflow"; + case ubsan_mul_overflow: + return "UBSAN: integer multiplication overflow"; +#endif default: return "UBSAN: unrecognized failure code"; } diff --git a/lib/ubsan.h b/lib/ubsan.h index 0abbbac8700d..07e37d4429b4 100644 --- a/lib/ubsan.h +++ b/lib/ubsan.h @@ -43,7 +43,7 @@ enum { struct type_descriptor { u16 type_kind; u16 type_info; - char type_name[1]; + char type_name[]; }; struct source_location { @@ -124,19 +124,32 @@ typedef s64 s_max; typedef u64 u_max; #endif -void __ubsan_handle_add_overflow(void *data, void *lhs, void *rhs); -void __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs); -void __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs); -void __ubsan_handle_negate_overflow(void *_data, void *old_val); -void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs); -void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr); -void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr); -void __ubsan_handle_out_of_bounds(void *_data, void *index); -void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs); -void __ubsan_handle_builtin_unreachable(void *_data); -void __ubsan_handle_load_invalid_value(void *_data, void *val); -void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr, - unsigned long align, - unsigned long offset); +/* + * When generating Runtime Calls, Clang doesn't respect the -mregparm=3 + * option used on i386: https://github.com/llvm/llvm-project/issues/89670 + * Fix this for earlier Clang versions by forcing the calling convention + * to use non-register arguments. + */ +#if defined(CONFIG_X86_32) && \ + defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 190000 +# define ubsan_linkage asmlinkage +#else +# define ubsan_linkage +#endif + +void ubsan_linkage __ubsan_handle_add_overflow(void *data, void *lhs, void *rhs); +void ubsan_linkage __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs); +void ubsan_linkage __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs); +void ubsan_linkage __ubsan_handle_negate_overflow(void *_data, void *old_val); +void ubsan_linkage __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs); +void ubsan_linkage __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr); +void ubsan_linkage __ubsan_handle_type_mismatch_v1(void *_data, void *ptr); +void ubsan_linkage __ubsan_handle_out_of_bounds(void *_data, void *index); +void ubsan_linkage __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs); +void ubsan_linkage __ubsan_handle_builtin_unreachable(void *_data); +void ubsan_linkage __ubsan_handle_load_invalid_value(void *_data, void *val); +void ubsan_linkage __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr, + unsigned long align, + unsigned long offset); #endif diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig index d883ac299508..c46c2300517c 100644 --- a/lib/vdso/Kconfig +++ b/lib/vdso/Kconfig @@ -30,4 +30,11 @@ config GENERIC_VDSO_TIME_NS Selected by architectures which support time namespaces in the VDSO +config GENERIC_VDSO_OVERFLOW_PROTECT + bool + help + Select to add multiplication overflow protection to the VDSO + time getter functions for the price of an extra conditional + in the hotpath. + endif diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c index ce2f69552003..899850bd6f0b 100644 --- a/lib/vdso/gettimeofday.c +++ b/lib/vdso/gettimeofday.c @@ -5,15 +5,23 @@ #include <vdso/datapage.h> #include <vdso/helpers.h> -#ifndef vdso_calc_delta -/* - * Default implementation which works for all sane clocksources. That - * obviously excludes x86/TSC. - */ -static __always_inline -u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) +#ifndef vdso_calc_ns + +#ifdef VDSO_DELTA_NOMASK +# define VDSO_DELTA_MASK(vd) ULLONG_MAX +#else +# define VDSO_DELTA_MASK(vd) (vd->mask) +#endif + +#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT +static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta) +{ + return delta < vd->max_cycles; +} +#else +static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta) { - return ((cycles - last) & mask) * mult; + return true; } #endif @@ -24,6 +32,21 @@ static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift) } #endif +/* + * Default implementation which works for all sane clocksources. That + * obviously excludes x86/TSC. + */ +static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base) +{ + u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd); + + if (likely(vdso_delta_ok(vd, delta))) + return vdso_shift_ns((delta * vd->mult) + base, vd->shift); + + return mul_u64_u32_add_u64_shr(delta, vd->mult, base, vd->shift); +} +#endif /* vdso_calc_ns */ + #ifndef __arch_vdso_hres_capable static inline bool __arch_vdso_hres_capable(void) { @@ -49,10 +72,10 @@ static inline bool vdso_cycles_ok(u64 cycles) static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, struct __kernel_timespec *ts) { - const struct vdso_data *vd; const struct timens_offset *offs = &vdns->offset[clk]; const struct vdso_timestamp *vdso_ts; - u64 cycles, last, ns; + const struct vdso_data *vd; + u64 cycles, ns; u32 seq; s64 sec; @@ -73,10 +96,7 @@ static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_ cycles = __arch_get_hw_counter(vd->clock_mode, vd); if (unlikely(!vdso_cycles_ok(cycles))) return -1; - ns = vdso_ts->nsec; - last = vd->cycle_last; - ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); - ns = vdso_shift_ns(ns, vd->shift); + ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec); sec = vdso_ts->sec; } while (unlikely(vdso_read_retry(vd, seq))); @@ -111,7 +131,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk, struct __kernel_timespec *ts) { const struct vdso_timestamp *vdso_ts = &vd->basetime[clk]; - u64 cycles, last, sec, ns; + u64 cycles, sec, ns; u32 seq; /* Allows to compile the high resolution parts out */ @@ -144,10 +164,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk, cycles = __arch_get_hw_counter(vd->clock_mode, vd); if (unlikely(!vdso_cycles_ok(cycles))) return -1; - ns = vdso_ts->nsec; - last = vd->cycle_last; - ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); - ns = vdso_shift_ns(ns, vd->shift); + ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec); sec = vdso_ts->sec; } while (unlikely(vdso_read_retry(vd, seq))); diff --git a/lib/xarray.c b/lib/xarray.c index 39f07bfc4dcc..5e7d6334d70d 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -969,8 +969,22 @@ static unsigned int node_get_marks(struct xa_node *node, unsigned int offset) return marks; } +static inline void node_mark_slots(struct xa_node *node, unsigned int sibs, + xa_mark_t mark) +{ + int i; + + if (sibs == 0) + node_mark_all(node, mark); + else { + for (i = 0; i < XA_CHUNK_SIZE; i += sibs + 1) + node_set_mark(node, i, mark); + } +} + static void node_set_marks(struct xa_node *node, unsigned int offset, - struct xa_node *child, unsigned int marks) + struct xa_node *child, unsigned int sibs, + unsigned int marks) { xa_mark_t mark = XA_MARK_0; @@ -978,7 +992,7 @@ static void node_set_marks(struct xa_node *node, unsigned int offset, if (marks & (1 << (__force unsigned int)mark)) { node_set_mark(node, offset, mark); if (child) - node_mark_all(child, mark); + node_mark_slots(child, sibs, mark); } if (mark == XA_MARK_MAX) break; @@ -1077,7 +1091,8 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order) child->nr_values = xa_is_value(entry) ? XA_CHUNK_SIZE : 0; RCU_INIT_POINTER(child->parent, node); - node_set_marks(node, offset, child, marks); + node_set_marks(node, offset, child, xas->xa_sibs, + marks); rcu_assign_pointer(node->slots[offset], xa_mk_node(child)); if (xa_is_value(curr)) @@ -1086,7 +1101,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order) } else { unsigned int canon = offset - xas->xa_sibs; - node_set_marks(node, canon, NULL, marks); + node_set_marks(node, canon, NULL, 0, marks); rcu_assign_pointer(node->slots[canon], entry); while (offset > canon) rcu_assign_pointer(node->slots[offset--], |