diff options
Diffstat (limited to 'lib')
33 files changed, 2048 insertions, 200 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index d4df5b24d75e..bd62be80228e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -17,6 +17,23 @@ config PRINTK_TIME The behavior is also controlled by the kernel command line parameter printk.time=1. See Documentation/admin-guide/kernel-parameters.rst +config PRINTK_CALLER + bool "Show caller information on printks" + depends on PRINTK + help + Selecting this option causes printk() to add a caller "thread id" (if + in task context) or a caller "processor id" (if not in task context) + to every message. + + This option is intended for environments where multiple threads + concurrently call printk() for many times, for it is difficult to + interpret without knowing where these lines (or sometimes individual + line which was divided into multiple lines due to race) came from. + + Since toggling after boot makes the code racy, currently there is + no option to enable/disable at the kernel command line parameter or + sysfs interface. + config CONSOLE_LOGLEVEL_DEFAULT int "Default console loglevel (1-15)" range 1 15 @@ -222,7 +239,6 @@ config ENABLE_MUST_CHECK config FRAME_WARN int "Warn for stack frames larger than (needs gcc 4.4)" range 0 8192 - default 3072 if KASAN_EXTRA default 2048 if GCC_PLUGIN_LATENT_ENTROPY default 1280 if (!64BIT && PARISC) default 1024 if (!64BIT && !PARISC) @@ -266,23 +282,6 @@ config UNUSED_SYMBOLS you really need it, and what the merge plan to the mainline kernel for your module is. -config PAGE_OWNER - bool "Track page owner" - depends on DEBUG_KERNEL && STACKTRACE_SUPPORT - select DEBUG_FS - select STACKTRACE - select STACKDEPOT - select PAGE_EXTENSION - help - This keeps track of what call chain is the owner of a page, may - help to find bare alloc_page(s) leaks. Even if you include this - feature on your build, it is disabled in default. You should pass - "page_owner=on" to boot parameter in order to enable it. Eats - a fair amount of memory if enabled. See tools/vm/page_owner_sort.c - for user-space helper. - - If unsure, say N. - config DEBUG_FS bool "Debug Filesystem" help @@ -1655,42 +1654,6 @@ config PROVIDE_OHCI1394_DMA_INIT See Documentation/debugging-via-ohci1394.txt for more information. -config DMA_API_DEBUG - bool "Enable debugging of DMA-API usage" - select NEED_DMA_MAP_STATE - help - Enable this option to debug the use of the DMA API by device drivers. - With this option you will be able to detect common bugs in device - drivers like double-freeing of DMA mappings or freeing mappings that - were never allocated. - - This also attempts to catch cases where a page owned by DMA is - accessed by the cpu in a way that could cause data corruption. For - example, this enables cow_user_page() to check that the source page is - not undergoing DMA. - - This option causes a performance degradation. Use only if you want to - debug device drivers and dma interactions. - - If unsure, say N. - -config DMA_API_DEBUG_SG - bool "Debug DMA scatter-gather usage" - default y - depends on DMA_API_DEBUG - help - Perform extra checking that callers of dma_map_sg() have respected the - appropriate segment length/boundary limits for the given device when - preparing DMA scatterlists. - - This is particularly likely to have been overlooked in cases where the - dma_map_sg() API is used for general bulk mapping of pages rather than - preparing literal scatter-gather descriptors, where there is a risk of - unexpected behaviour from DMA API implementations if the scatterlist - is technically out-of-spec. - - If unsure, say N. - menuconfig RUNTIME_TESTING_MENU bool "Runtime Testing" def_bool y @@ -1700,7 +1663,6 @@ if RUNTIME_TESTING_MENU config LKDTM tristate "Linux Kernel Dump Test Tool Module" depends on DEBUG_FS - depends on BLOCK help This module enables testing of the different dumping mechanisms by inducing system failures at predefined crash points. @@ -1876,6 +1838,19 @@ config TEST_LKM If unsure, say N. +config TEST_VMALLOC + tristate "Test module for stress/performance analysis of vmalloc allocator" + default n + depends on MMU + depends on m + help + This builds the "test_vmalloc" module that should be used for + stress and performance analysis. So, any new change for vmalloc + subsystem can be evaluated from performance and stability point + of view. + + If unsure, say N. + config TEST_USER_COPY tristate "Test user/kernel boundary protections" depends on m @@ -1991,6 +1966,28 @@ config TEST_MEMCAT_P If unsure, say N. +config TEST_LIVEPATCH + tristate "Test livepatching" + default n + depends on DYNAMIC_DEBUG + depends on LIVEPATCH + depends on m + help + Test kernel livepatching features for correctness. The tests will + load test modules that will be livepatched in various scenarios. + + To run all the livepatching tests: + + make -C tools/testing/selftests TARGETS=livepatch run_tests + + Alternatively, individual tests may be invoked: + + tools/testing/selftests/livepatch/test-callbacks.sh + tools/testing/selftests/livepatch/test-livepatch.sh + tools/testing/selftests/livepatch/test-shadow-vars.sh + + If unsure, say N. + config TEST_OBJAGG tristate "Perform selftest on object aggreration manager" default n @@ -1999,6 +1996,15 @@ config TEST_OBJAGG Enable this option to test object aggregation manager on boot (or module load). + +config TEST_STACKINIT + tristate "Test level of stack variable initialization" + help + Test if the kernel is zero-initializing stack variables and + padding. Coverage is controlled by compiler flags, + CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF, + or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL. + If unsure, say N. endif # RUNTIME_TESTING_MENU diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 9737059ec58b..9950b660e62d 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -78,16 +78,6 @@ config KASAN_SW_TAGS endchoice -config KASAN_EXTRA - bool "KASAN: extra checks" - depends on KASAN_GENERIC && DEBUG_KERNEL && !COMPILE_TEST - help - This enables further checks in generic KASAN, for now it only - includes the address-use-after-scope check that can lead to - excessive kernel stack usage, frame size warnings and longer - compile time. - See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 - choice prompt "Instrumentation type" depends on KASAN diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index 98fa559ebd80..a2ae4a8e4fa6 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -27,15 +27,19 @@ config UBSAN_SANITIZE_ALL Enabling this option will get kernel image size increased significantly. -config UBSAN_ALIGNMENT - bool "Enable checking of pointers alignment" +config UBSAN_NO_ALIGNMENT + bool "Disable checking of pointers alignment" depends on UBSAN - default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS + default y if HAVE_EFFICIENT_UNALIGNED_ACCESS help - This option enables detection of unaligned memory accesses. - Enabling this option on architectures that support unaligned + This option disables the check of unaligned memory accesses. + This option should be used when building allmodconfig. + Disabling this option on architectures that support unaligned accesses may produce a lot of false positives. +config UBSAN_ALIGNMENT + def_bool !UBSAN_NO_ALIGNMENT + config TEST_UBSAN tristate "Module for testing for undefined behavior detection" depends on m && UBSAN diff --git a/lib/Makefile b/lib/Makefile index e1b59da71418..647517940b29 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -60,6 +60,7 @@ UBSAN_SANITIZE_test_ubsan.o := y obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o obj-$(CONFIG_TEST_LKM) += test_module.o +obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o obj-$(CONFIG_TEST_SORT) += test_sort.o @@ -76,6 +77,9 @@ obj-$(CONFIG_TEST_KMOD) += test_kmod.o obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o +obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o + +obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/ ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 59875eb278ea..edc3c14af41d 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -1117,6 +1117,7 @@ struct assoc_array_edit *assoc_array_delete(struct assoc_array *array, index_key)) goto found_leaf; } + /* fall through */ case assoc_array_walk_tree_empty: case assoc_array_walk_found_wrong_shortcut: default: diff --git a/lib/cpumask.c b/lib/cpumask.c index 8d666ab84b5c..087a3e9a0202 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -5,6 +5,7 @@ #include <linux/cpumask.h> #include <linux/export.h> #include <linux/memblock.h> +#include <linux/numa.h> /** * cpumask_next - get the next cpu in a cpumask @@ -206,7 +207,7 @@ unsigned int cpumask_local_spread(unsigned int i, int node) /* Wrap: we always want a cpu. */ i %= num_online_cpus(); - if (node == -1) { + if (node == NUMA_NO_NODE) { for_each_cpu(cpu, cpu_online_mask) if (i-- == 0) return cpu; diff --git a/lib/devres.c b/lib/devres.c index faccf1a037d0..69bed2f38306 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -134,7 +134,6 @@ EXPORT_SYMBOL(devm_iounmap); void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) { resource_size_t size; - const char *name; void __iomem *dest_ptr; BUG_ON(!dev); @@ -145,9 +144,8 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) } size = resource_size(res); - name = res->name ?: dev_name(dev); - if (!devm_request_mem_region(dev, res->start, size, name)) { + if (!devm_request_mem_region(dev, res->start, size, dev_name(dev))) { dev_err(dev, "can't request region for resource %pR\n", res); return IOMEM_ERR_PTR(-EBUSY); } diff --git a/lib/div64.c b/lib/div64.c index 01c8602bb6ff..ee146bb4c558 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -109,7 +109,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) quot = div_u64_rem(dividend, divisor, &rem32); *remainder = rem32; } else { - int n = 1 + fls(high); + int n = fls(high); quot = div_u64(dividend >> n, divisor >> n); if (quot != 0) @@ -147,7 +147,7 @@ u64 div64_u64(u64 dividend, u64 divisor) if (high == 0) { quot = div_u64(dividend, divisor); } else { - int n = 1 + fls(high); + int n = fls(high); quot = div_u64(dividend >> n, divisor >> n); if (quot != 0) diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index dbf2b457e47e..7bdf98c37e91 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -847,17 +847,19 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, const char *name) { struct ddebug_table *dt; - const char *new_name; dt = kzalloc(sizeof(*dt), GFP_KERNEL); - if (dt == NULL) - return -ENOMEM; - new_name = kstrdup_const(name, GFP_KERNEL); - if (new_name == NULL) { - kfree(dt); + if (dt == NULL) { + pr_err("error adding module: %s\n", name); return -ENOMEM; } - dt->mod_name = new_name; + /* + * For built-in modules, name lives in .rodata and is + * immortal. For loaded modules, name points at the name[] + * member of struct module, which lives at least as long as + * this struct ddebug_table. + */ + dt->mod_name = name; dt->num_ddebugs = n; dt->ddebugs = tab; @@ -868,7 +870,6 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, vpr_info("%u debug prints in module %s\n", n, dt->mod_name); return 0; } -EXPORT_SYMBOL_GPL(ddebug_add_module); /* helper for ddebug_dyndbg_(boot|module)_param_cb */ static int ddebug_dyndbg_param_cb(char *param, char *val, @@ -913,7 +914,6 @@ int ddebug_dyndbg_module_param_cb(char *param, char *val, const char *module) static void ddebug_table_free(struct ddebug_table *dt) { list_del_init(&dt->link); - kfree_const(dt->mod_name); kfree(dt); } @@ -930,15 +930,15 @@ int ddebug_remove_module(const char *mod_name) mutex_lock(&ddebug_lock); list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) { - if (!strcmp(dt->mod_name, mod_name)) { + if (dt->mod_name == mod_name) { ddebug_table_free(dt); ret = 0; + break; } } mutex_unlock(&ddebug_lock); return ret; } -EXPORT_SYMBOL_GPL(ddebug_remove_module); static void ddebug_remove_all_tables(void) { diff --git a/lib/iomap.c b/lib/iomap.c index 541d926da95e..e909ab71e995 100644 --- a/lib/iomap.c +++ b/lib/iomap.c @@ -65,8 +65,9 @@ static void bad_io_access(unsigned long port, const char *access) #endif #ifndef mmio_read16be -#define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr)) -#define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr)) +#define mmio_read16be(addr) swab16(readw(addr)) +#define mmio_read32be(addr) swab32(readl(addr)) +#define mmio_read64be(addr) swab64(readq(addr)) #endif unsigned int ioread8(void __iomem *addr) @@ -100,14 +101,89 @@ EXPORT_SYMBOL(ioread16be); EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(ioread32be); +#ifdef readq +static u64 pio_read64_lo_hi(unsigned long port) +{ + u64 lo, hi; + + lo = inl(port); + hi = inl(port + sizeof(u32)); + + return lo | (hi << 32); +} + +static u64 pio_read64_hi_lo(unsigned long port) +{ + u64 lo, hi; + + hi = inl(port + sizeof(u32)); + lo = inl(port); + + return lo | (hi << 32); +} + +static u64 pio_read64be_lo_hi(unsigned long port) +{ + u64 lo, hi; + + lo = pio_read32be(port + sizeof(u32)); + hi = pio_read32be(port); + + return lo | (hi << 32); +} + +static u64 pio_read64be_hi_lo(unsigned long port) +{ + u64 lo, hi; + + hi = pio_read32be(port); + lo = pio_read32be(port + sizeof(u32)); + + return lo | (hi << 32); +} + +u64 ioread64_lo_hi(void __iomem *addr) +{ + IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr)); + return 0xffffffffffffffffULL; +} + +u64 ioread64_hi_lo(void __iomem *addr) +{ + IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr)); + return 0xffffffffffffffffULL; +} + +u64 ioread64be_lo_hi(void __iomem *addr) +{ + IO_COND(addr, return pio_read64be_lo_hi(port), + return mmio_read64be(addr)); + return 0xffffffffffffffffULL; +} + +u64 ioread64be_hi_lo(void __iomem *addr) +{ + IO_COND(addr, return pio_read64be_hi_lo(port), + return mmio_read64be(addr)); + return 0xffffffffffffffffULL; +} + +EXPORT_SYMBOL(ioread64_lo_hi); +EXPORT_SYMBOL(ioread64_hi_lo); +EXPORT_SYMBOL(ioread64be_lo_hi); +EXPORT_SYMBOL(ioread64be_hi_lo); + +#endif /* readq */ + #ifndef pio_write16be #define pio_write16be(val,port) outw(swab16(val),port) #define pio_write32be(val,port) outl(swab32(val),port) #endif #ifndef mmio_write16be -#define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port) -#define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port) +#define mmio_write16be(val,port) writew(swab16(val),port) +#define mmio_write32be(val,port) writel(swab32(val),port) +#define mmio_write64be(val,port) writeq(swab64(val),port) #endif void iowrite8(u8 val, void __iomem *addr) @@ -136,6 +212,62 @@ EXPORT_SYMBOL(iowrite16be); EXPORT_SYMBOL(iowrite32); EXPORT_SYMBOL(iowrite32be); +#ifdef writeq +static void pio_write64_lo_hi(u64 val, unsigned long port) +{ + outl(val, port); + outl(val >> 32, port + sizeof(u32)); +} + +static void pio_write64_hi_lo(u64 val, unsigned long port) +{ + outl(val >> 32, port + sizeof(u32)); + outl(val, port); +} + +static void pio_write64be_lo_hi(u64 val, unsigned long port) +{ + pio_write32be(val, port + sizeof(u32)); + pio_write32be(val >> 32, port); +} + +static void pio_write64be_hi_lo(u64 val, unsigned long port) +{ + pio_write32be(val >> 32, port); + pio_write32be(val, port + sizeof(u32)); +} + +void iowrite64_lo_hi(u64 val, void __iomem *addr) +{ + IO_COND(addr, pio_write64_lo_hi(val, port), + writeq(val, addr)); +} + +void iowrite64_hi_lo(u64 val, void __iomem *addr) +{ + IO_COND(addr, pio_write64_hi_lo(val, port), + writeq(val, addr)); +} + +void iowrite64be_lo_hi(u64 val, void __iomem *addr) +{ + IO_COND(addr, pio_write64be_lo_hi(val, port), + mmio_write64be(val, addr)); +} + +void iowrite64be_hi_lo(u64 val, void __iomem *addr) +{ + IO_COND(addr, pio_write64be_hi_lo(val, port), + mmio_write64be(val, addr)); +} + +EXPORT_SYMBOL(iowrite64_lo_hi); +EXPORT_SYMBOL(iowrite64_hi_lo); +EXPORT_SYMBOL(iowrite64be_lo_hi); +EXPORT_SYMBOL(iowrite64be_hi_lo); + +#endif /* readq */ + /* * These are the "repeat MMIO read/write" functions. * Note the "__raw" accesses, since we don't want to diff --git a/lib/irq_poll.c b/lib/irq_poll.c index 86a709954f5a..2f17b488d58e 100644 --- a/lib/irq_poll.c +++ b/lib/irq_poll.c @@ -35,7 +35,7 @@ void irq_poll_sched(struct irq_poll *iop) local_irq_save(flags); list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); - __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); + raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); local_irq_restore(flags); } EXPORT_SYMBOL(irq_poll_sched); diff --git a/lib/kobject.c b/lib/kobject.c index b72e00fd7d09..aa89edcd2b63 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -887,7 +887,7 @@ static void kset_release(struct kobject *kobj) kfree(kset); } -void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) +static void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) { if (kobj->parent) kobject_get_ownership(kobj->parent, uid, gid); diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 27c6118afd1c..f05802687ba4 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -200,7 +200,7 @@ int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count) r = kobject_action_type(buf, count, &action, &action_args); if (r) { - msg = "unknown uevent action string\n"; + msg = "unknown uevent action string"; goto out; } @@ -212,7 +212,7 @@ int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count) r = kobject_action_args(action_args, count - (action_args - buf), &env); if (r == -EINVAL) { - msg = "incorrect uevent action arguments\n"; + msg = "incorrect uevent action arguments"; goto out; } @@ -224,7 +224,7 @@ int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count) out: if (r) { devpath = kobject_get_path(kobj, GFP_KERNEL); - printk(KERN_WARNING "synth uevent: %s: %s", + pr_warn("synth uevent: %s: %s\n", devpath ?: "unknown device", msg ?: "failed to send uevent"); kfree(devpath); @@ -765,8 +765,7 @@ static int uevent_net_init(struct net *net) ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg); if (!ue_sk->sk) { - printk(KERN_ERR - "kobject_uevent: unable to create netlink socket!\n"); + pr_err("kobject_uevent: unable to create netlink socket!\n"); kfree(ue_sk); return -ENODEV; } diff --git a/lib/livepatch/Makefile b/lib/livepatch/Makefile new file mode 100644 index 000000000000..26900ddaef82 --- /dev/null +++ b/lib/livepatch/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for livepatch test code. + +obj-$(CONFIG_TEST_LIVEPATCH) += test_klp_atomic_replace.o \ + test_klp_callbacks_demo.o \ + test_klp_callbacks_demo2.o \ + test_klp_callbacks_busy.o \ + test_klp_callbacks_mod.o \ + test_klp_livepatch.o \ + test_klp_shadow_vars.o + +# Target modules to be livepatched require CC_FLAGS_FTRACE +CFLAGS_test_klp_callbacks_busy.o += $(CC_FLAGS_FTRACE) +CFLAGS_test_klp_callbacks_mod.o += $(CC_FLAGS_FTRACE) diff --git a/lib/livepatch/test_klp_atomic_replace.c b/lib/livepatch/test_klp_atomic_replace.c new file mode 100644 index 000000000000..5af7093ca00c --- /dev/null +++ b/lib/livepatch/test_klp_atomic_replace.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/livepatch.h> + +static int replace; +module_param(replace, int, 0644); +MODULE_PARM_DESC(replace, "replace (default=0)"); + +#include <linux/seq_file.h> +static int livepatch_meminfo_proc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%s: %s\n", THIS_MODULE->name, + "this has been live patched"); + return 0; +} + +static struct klp_func funcs[] = { + { + .old_name = "meminfo_proc_show", + .new_func = livepatch_meminfo_proc_show, + }, {} +}; + +static struct klp_object objs[] = { + { + /* name being NULL means vmlinux */ + .funcs = funcs, + }, {} +}; + +static struct klp_patch patch = { + .mod = THIS_MODULE, + .objs = objs, + /* set .replace in the init function below for demo purposes */ +}; + +static int test_klp_atomic_replace_init(void) +{ + patch.replace = replace; + return klp_enable_patch(&patch); +} + +static void test_klp_atomic_replace_exit(void) +{ +} + +module_init(test_klp_atomic_replace_init); +module_exit(test_klp_atomic_replace_exit); +MODULE_LICENSE("GPL"); +MODULE_INFO(livepatch, "Y"); +MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); +MODULE_DESCRIPTION("Livepatch test: atomic replace"); diff --git a/lib/livepatch/test_klp_callbacks_busy.c b/lib/livepatch/test_klp_callbacks_busy.c new file mode 100644 index 000000000000..40beddf8a0e2 --- /dev/null +++ b/lib/livepatch/test_klp_callbacks_busy.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/workqueue.h> +#include <linux/delay.h> + +static int sleep_secs; +module_param(sleep_secs, int, 0644); +MODULE_PARM_DESC(sleep_secs, "sleep_secs (default=0)"); + +static void busymod_work_func(struct work_struct *work); +static DECLARE_DELAYED_WORK(work, busymod_work_func); + +static void busymod_work_func(struct work_struct *work) +{ + pr_info("%s, sleeping %d seconds ...\n", __func__, sleep_secs); + msleep(sleep_secs * 1000); + pr_info("%s exit\n", __func__); +} + +static int test_klp_callbacks_busy_init(void) +{ + pr_info("%s\n", __func__); + schedule_delayed_work(&work, + msecs_to_jiffies(1000 * 0)); + return 0; +} + +static void test_klp_callbacks_busy_exit(void) +{ + cancel_delayed_work_sync(&work); + pr_info("%s\n", __func__); +} + +module_init(test_klp_callbacks_busy_init); +module_exit(test_klp_callbacks_busy_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); +MODULE_DESCRIPTION("Livepatch test: busy target module"); diff --git a/lib/livepatch/test_klp_callbacks_demo.c b/lib/livepatch/test_klp_callbacks_demo.c new file mode 100644 index 000000000000..3fd8fe1cd1cc --- /dev/null +++ b/lib/livepatch/test_klp_callbacks_demo.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/livepatch.h> + +static int pre_patch_ret; +module_param(pre_patch_ret, int, 0644); +MODULE_PARM_DESC(pre_patch_ret, "pre_patch_ret (default=0)"); + +static const char *const module_state[] = { + [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state", + [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init", + [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away", + [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up", +}; + +static void callback_info(const char *callback, struct klp_object *obj) +{ + if (obj->mod) + pr_info("%s: %s -> %s\n", callback, obj->mod->name, + module_state[obj->mod->state]); + else + pr_info("%s: vmlinux\n", callback); +} + +/* Executed on object patching (ie, patch enablement) */ +static int pre_patch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); + return pre_patch_ret; +} + +/* Executed on object unpatching (ie, patch disablement) */ +static void post_patch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); +} + +/* Executed on object unpatching (ie, patch disablement) */ +static void pre_unpatch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); +} + +/* Executed on object unpatching (ie, patch disablement) */ +static void post_unpatch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); +} + +static void patched_work_func(struct work_struct *work) +{ + pr_info("%s\n", __func__); +} + +static struct klp_func no_funcs[] = { + {} +}; + +static struct klp_func busymod_funcs[] = { + { + .old_name = "busymod_work_func", + .new_func = patched_work_func, + }, {} +}; + +static struct klp_object objs[] = { + { + .name = NULL, /* vmlinux */ + .funcs = no_funcs, + .callbacks = { + .pre_patch = pre_patch_callback, + .post_patch = post_patch_callback, + .pre_unpatch = pre_unpatch_callback, + .post_unpatch = post_unpatch_callback, + }, + }, { + .name = "test_klp_callbacks_mod", + .funcs = no_funcs, + .callbacks = { + .pre_patch = pre_patch_callback, + .post_patch = post_patch_callback, + .pre_unpatch = pre_unpatch_callback, + .post_unpatch = post_unpatch_callback, + }, + }, { + .name = "test_klp_callbacks_busy", + .funcs = busymod_funcs, + .callbacks = { + .pre_patch = pre_patch_callback, + .post_patch = post_patch_callback, + .pre_unpatch = pre_unpatch_callback, + .post_unpatch = post_unpatch_callback, + }, + }, { } +}; + +static struct klp_patch patch = { + .mod = THIS_MODULE, + .objs = objs, +}; + +static int test_klp_callbacks_demo_init(void) +{ + return klp_enable_patch(&patch); +} + +static void test_klp_callbacks_demo_exit(void) +{ +} + +module_init(test_klp_callbacks_demo_init); +module_exit(test_klp_callbacks_demo_exit); +MODULE_LICENSE("GPL"); +MODULE_INFO(livepatch, "Y"); +MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); +MODULE_DESCRIPTION("Livepatch test: livepatch demo"); diff --git a/lib/livepatch/test_klp_callbacks_demo2.c b/lib/livepatch/test_klp_callbacks_demo2.c new file mode 100644 index 000000000000..5417573e80af --- /dev/null +++ b/lib/livepatch/test_klp_callbacks_demo2.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/livepatch.h> + +static int replace; +module_param(replace, int, 0644); +MODULE_PARM_DESC(replace, "replace (default=0)"); + +static const char *const module_state[] = { + [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state", + [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init", + [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away", + [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up", +}; + +static void callback_info(const char *callback, struct klp_object *obj) +{ + if (obj->mod) + pr_info("%s: %s -> %s\n", callback, obj->mod->name, + module_state[obj->mod->state]); + else + pr_info("%s: vmlinux\n", callback); +} + +/* Executed on object patching (ie, patch enablement) */ +static int pre_patch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); + return 0; +} + +/* Executed on object unpatching (ie, patch disablement) */ +static void post_patch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); +} + +/* Executed on object unpatching (ie, patch disablement) */ +static void pre_unpatch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); +} + +/* Executed on object unpatching (ie, patch disablement) */ +static void post_unpatch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); +} + +static struct klp_func no_funcs[] = { + { } +}; + +static struct klp_object objs[] = { + { + .name = NULL, /* vmlinux */ + .funcs = no_funcs, + .callbacks = { + .pre_patch = pre_patch_callback, + .post_patch = post_patch_callback, + .pre_unpatch = pre_unpatch_callback, + .post_unpatch = post_unpatch_callback, + }, + }, { } +}; + +static struct klp_patch patch = { + .mod = THIS_MODULE, + .objs = objs, + /* set .replace in the init function below for demo purposes */ +}; + +static int test_klp_callbacks_demo2_init(void) +{ + patch.replace = replace; + return klp_enable_patch(&patch); +} + +static void test_klp_callbacks_demo2_exit(void) +{ +} + +module_init(test_klp_callbacks_demo2_init); +module_exit(test_klp_callbacks_demo2_exit); +MODULE_LICENSE("GPL"); +MODULE_INFO(livepatch, "Y"); +MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); +MODULE_DESCRIPTION("Livepatch test: livepatch demo2"); diff --git a/lib/livepatch/test_klp_callbacks_mod.c b/lib/livepatch/test_klp_callbacks_mod.c new file mode 100644 index 000000000000..8fbe645b1c2c --- /dev/null +++ b/lib/livepatch/test_klp_callbacks_mod.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> + +static int test_klp_callbacks_mod_init(void) +{ + pr_info("%s\n", __func__); + return 0; +} + +static void test_klp_callbacks_mod_exit(void) +{ + pr_info("%s\n", __func__); +} + +module_init(test_klp_callbacks_mod_init); +module_exit(test_klp_callbacks_mod_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); +MODULE_DESCRIPTION("Livepatch test: target module"); diff --git a/lib/livepatch/test_klp_livepatch.c b/lib/livepatch/test_klp_livepatch.c new file mode 100644 index 000000000000..aff08199de71 --- /dev/null +++ b/lib/livepatch/test_klp_livepatch.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/livepatch.h> + +#include <linux/seq_file.h> +static int livepatch_cmdline_proc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%s: %s\n", THIS_MODULE->name, + "this has been live patched"); + return 0; +} + +static struct klp_func funcs[] = { + { + .old_name = "cmdline_proc_show", + .new_func = livepatch_cmdline_proc_show, + }, { } +}; + +static struct klp_object objs[] = { + { + /* name being NULL means vmlinux */ + .funcs = funcs, + }, { } +}; + +static struct klp_patch patch = { + .mod = THIS_MODULE, + .objs = objs, +}; + +static int test_klp_livepatch_init(void) +{ + return klp_enable_patch(&patch); +} + +static void test_klp_livepatch_exit(void) +{ +} + +module_init(test_klp_livepatch_init); +module_exit(test_klp_livepatch_exit); +MODULE_LICENSE("GPL"); +MODULE_INFO(livepatch, "Y"); +MODULE_AUTHOR("Seth Jennings <sjenning@redhat.com>"); +MODULE_DESCRIPTION("Livepatch test: livepatch module"); diff --git a/lib/livepatch/test_klp_shadow_vars.c b/lib/livepatch/test_klp_shadow_vars.c new file mode 100644 index 000000000000..fe5c413efe96 --- /dev/null +++ b/lib/livepatch/test_klp_shadow_vars.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/livepatch.h> +#include <linux/slab.h> + +/* + * Keep a small list of pointers so that we can print address-agnostic + * pointer values. Use a rolling integer count to differentiate the values. + * Ironically we could have used the shadow variable API to do this, but + * let's not lean too heavily on the very code we're testing. + */ +static LIST_HEAD(ptr_list); +struct shadow_ptr { + void *ptr; + int id; + struct list_head list; +}; + +static void free_ptr_list(void) +{ + struct shadow_ptr *sp, *tmp_sp; + + list_for_each_entry_safe(sp, tmp_sp, &ptr_list, list) { + list_del(&sp->list); + kfree(sp); + } +} + +static int ptr_id(void *ptr) +{ + struct shadow_ptr *sp; + static int count; + + list_for_each_entry(sp, &ptr_list, list) { + if (sp->ptr == ptr) + return sp->id; + } + + sp = kmalloc(sizeof(*sp), GFP_ATOMIC); + if (!sp) + return -ENOMEM; + sp->ptr = ptr; + sp->id = count++; + + list_add(&sp->list, &ptr_list); + + return sp->id; +} + +/* + * Shadow variable wrapper functions that echo the function and arguments + * to the kernel log for testing verification. Don't display raw pointers, + * but use the ptr_id() value instead. + */ +static void *shadow_get(void *obj, unsigned long id) +{ + void *ret = klp_shadow_get(obj, id); + + pr_info("klp_%s(obj=PTR%d, id=0x%lx) = PTR%d\n", + __func__, ptr_id(obj), id, ptr_id(ret)); + + return ret; +} + +static void *shadow_alloc(void *obj, unsigned long id, size_t size, + gfp_t gfp_flags, klp_shadow_ctor_t ctor, + void *ctor_data) +{ + void *ret = klp_shadow_alloc(obj, id, size, gfp_flags, ctor, + ctor_data); + pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n", + __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor), + ptr_id(ctor_data), ptr_id(ret)); + return ret; +} + +static void *shadow_get_or_alloc(void *obj, unsigned long id, size_t size, + gfp_t gfp_flags, klp_shadow_ctor_t ctor, + void *ctor_data) +{ + void *ret = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor, + ctor_data); + pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n", + __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor), + ptr_id(ctor_data), ptr_id(ret)); + return ret; +} + +static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor) +{ + klp_shadow_free(obj, id, dtor); + pr_info("klp_%s(obj=PTR%d, id=0x%lx, dtor=PTR%d)\n", + __func__, ptr_id(obj), id, ptr_id(dtor)); +} + +static void shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor) +{ + klp_shadow_free_all(id, dtor); + pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n", + __func__, id, ptr_id(dtor)); +} + + +/* Shadow variable constructor - remember simple pointer data */ +static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data) +{ + int **shadow_int = shadow_data; + *shadow_int = ctor_data; + pr_info("%s: PTR%d -> PTR%d\n", + __func__, ptr_id(shadow_int), ptr_id(ctor_data)); + + return 0; +} + +static void shadow_dtor(void *obj, void *shadow_data) +{ + pr_info("%s(obj=PTR%d, shadow_data=PTR%d)\n", + __func__, ptr_id(obj), ptr_id(shadow_data)); +} + +static int test_klp_shadow_vars_init(void) +{ + void *obj = THIS_MODULE; + int id = 0x1234; + size_t size = sizeof(int *); + gfp_t gfp_flags = GFP_KERNEL; + + int var1, var2, var3, var4; + int **sv1, **sv2, **sv3, **sv4; + + void *ret; + + ptr_id(NULL); + ptr_id(&var1); + ptr_id(&var2); + ptr_id(&var3); + ptr_id(&var4); + + /* + * With an empty shadow variable hash table, expect not to find + * any matches. + */ + ret = shadow_get(obj, id); + if (!ret) + pr_info(" got expected NULL result\n"); + + /* + * Allocate a few shadow variables with different <obj> and <id>. + */ + sv1 = shadow_alloc(obj, id, size, gfp_flags, shadow_ctor, &var1); + if (!sv1) + return -ENOMEM; + + sv2 = shadow_alloc(obj + 1, id, size, gfp_flags, shadow_ctor, &var2); + if (!sv2) + return -ENOMEM; + + sv3 = shadow_alloc(obj, id + 1, size, gfp_flags, shadow_ctor, &var3); + if (!sv3) + return -ENOMEM; + + /* + * Verify we can find our new shadow variables and that they point + * to expected data. + */ + ret = shadow_get(obj, id); + if (!ret) + return -EINVAL; + if (ret == sv1 && *sv1 == &var1) + pr_info(" got expected PTR%d -> PTR%d result\n", + ptr_id(sv1), ptr_id(*sv1)); + + ret = shadow_get(obj + 1, id); + if (!ret) + return -EINVAL; + if (ret == sv2 && *sv2 == &var2) + pr_info(" got expected PTR%d -> PTR%d result\n", + ptr_id(sv2), ptr_id(*sv2)); + ret = shadow_get(obj, id + 1); + if (!ret) + return -EINVAL; + if (ret == sv3 && *sv3 == &var3) + pr_info(" got expected PTR%d -> PTR%d result\n", + ptr_id(sv3), ptr_id(*sv3)); + + /* + * Allocate or get a few more, this time with the same <obj>, <id>. + * The second invocation should return the same shadow var. + */ + sv4 = shadow_get_or_alloc(obj + 2, id, size, gfp_flags, shadow_ctor, &var4); + if (!sv4) + return -ENOMEM; + + ret = shadow_get_or_alloc(obj + 2, id, size, gfp_flags, shadow_ctor, &var4); + if (!ret) + return -EINVAL; + if (ret == sv4 && *sv4 == &var4) + pr_info(" got expected PTR%d -> PTR%d result\n", + ptr_id(sv4), ptr_id(*sv4)); + + /* + * Free the <obj=*, id> shadow variables and check that we can no + * longer find them. + */ + shadow_free(obj, id, shadow_dtor); /* sv1 */ + ret = shadow_get(obj, id); + if (!ret) + pr_info(" got expected NULL result\n"); + + shadow_free(obj + 1, id, shadow_dtor); /* sv2 */ + ret = shadow_get(obj + 1, id); + if (!ret) + pr_info(" got expected NULL result\n"); + + shadow_free(obj + 2, id, shadow_dtor); /* sv4 */ + ret = shadow_get(obj + 2, id); + if (!ret) + pr_info(" got expected NULL result\n"); + + /* + * We should still find an <id+1> variable. + */ + ret = shadow_get(obj, id + 1); + if (!ret) + return -EINVAL; + if (ret == sv3 && *sv3 == &var3) + pr_info(" got expected PTR%d -> PTR%d result\n", + ptr_id(sv3), ptr_id(*sv3)); + + /* + * Free all the <id+1> variables, too. + */ + shadow_free_all(id + 1, shadow_dtor); /* sv3 */ + ret = shadow_get(obj, id); + if (!ret) + pr_info(" shadow_get() got expected NULL result\n"); + + + free_ptr_list(); + + return 0; +} + +static void test_klp_shadow_vars_exit(void) +{ +} + +module_init(test_klp_shadow_vars_init); +module_exit(test_klp_shadow_vars_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); +MODULE_DESCRIPTION("Livepatch test: shadow variables"); diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index 236eb21167b5..4525fb094844 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c @@ -20,7 +20,8 @@ static noinline size_t lzo1x_1_do_compress(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len, - size_t ti, void *wrkmem) + size_t ti, void *wrkmem, signed char *state_offset, + const unsigned char bitstream_version) { const unsigned char *ip; unsigned char *op; @@ -35,27 +36,85 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, ip += ti < 4 ? 4 - ti : 0; for (;;) { - const unsigned char *m_pos; + const unsigned char *m_pos = NULL; size_t t, m_len, m_off; u32 dv; + u32 run_length = 0; literal: ip += 1 + ((ip - ii) >> 5); next: if (unlikely(ip >= ip_end)) break; dv = get_unaligned_le32(ip); - t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK; - m_pos = in + dict[t]; - dict[t] = (lzo_dict_t) (ip - in); - if (unlikely(dv != get_unaligned_le32(m_pos))) - goto literal; + + if (dv == 0 && bitstream_version) { + const unsigned char *ir = ip + 4; + const unsigned char *limit = ip_end + < (ip + MAX_ZERO_RUN_LENGTH + 1) + ? ip_end : ip + MAX_ZERO_RUN_LENGTH + 1; +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \ + defined(LZO_FAST_64BIT_MEMORY_ACCESS) + u64 dv64; + + for (; (ir + 32) <= limit; ir += 32) { + dv64 = get_unaligned((u64 *)ir); + dv64 |= get_unaligned((u64 *)ir + 1); + dv64 |= get_unaligned((u64 *)ir + 2); + dv64 |= get_unaligned((u64 *)ir + 3); + if (dv64) + break; + } + for (; (ir + 8) <= limit; ir += 8) { + dv64 = get_unaligned((u64 *)ir); + if (dv64) { +# if defined(__LITTLE_ENDIAN) + ir += __builtin_ctzll(dv64) >> 3; +# elif defined(__BIG_ENDIAN) + ir += __builtin_clzll(dv64) >> 3; +# else +# error "missing endian definition" +# endif + break; + } + } +#else + while ((ir < (const unsigned char *) + ALIGN((uintptr_t)ir, 4)) && + (ir < limit) && (*ir == 0)) + ir++; + for (; (ir + 4) <= limit; ir += 4) { + dv = *((u32 *)ir); + if (dv) { +# if defined(__LITTLE_ENDIAN) + ir += __builtin_ctz(dv) >> 3; +# elif defined(__BIG_ENDIAN) + ir += __builtin_clz(dv) >> 3; +# else +# error "missing endian definition" +# endif + break; + } + } +#endif + while (likely(ir < limit) && unlikely(*ir == 0)) + ir++; + run_length = ir - ip; + if (run_length > MAX_ZERO_RUN_LENGTH) + run_length = MAX_ZERO_RUN_LENGTH; + } else { + t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK; + m_pos = in + dict[t]; + dict[t] = (lzo_dict_t) (ip - in); + if (unlikely(dv != get_unaligned_le32(m_pos))) + goto literal; + } ii -= ti; ti = 0; t = ip - ii; if (t != 0) { if (t <= 3) { - op[-2] |= t; + op[*state_offset] |= t; COPY4(op, ii); op += t; } else if (t <= 16) { @@ -88,6 +147,17 @@ next: } } + if (unlikely(run_length)) { + ip += run_length; + run_length -= MIN_ZERO_RUN_LENGTH; + put_unaligned_le32((run_length << 21) | 0xfffc18 + | (run_length & 0x7), op); + op += 4; + run_length = 0; + *state_offset = -3; + goto finished_writing_instruction; + } + m_len = 4; { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64) @@ -170,7 +240,6 @@ m_len_done: m_off = ip - m_pos; ip += m_len; - ii = ip; if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { m_off -= 1; *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); @@ -207,29 +276,45 @@ m_len_done: *op++ = (m_off << 2); *op++ = (m_off >> 6); } + *state_offset = -2; +finished_writing_instruction: + ii = ip; goto next; } *out_len = op - out; return in_end - (ii - ti); } -int lzo1x_1_compress(const unsigned char *in, size_t in_len, +int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len, - void *wrkmem) + void *wrkmem, const unsigned char bitstream_version) { const unsigned char *ip = in; unsigned char *op = out; size_t l = in_len; size_t t = 0; + signed char state_offset = -2; + unsigned int m4_max_offset; + + // LZO v0 will never write 17 as first byte, + // so this is used to version the bitstream + if (bitstream_version > 0) { + *op++ = 17; + *op++ = bitstream_version; + m4_max_offset = M4_MAX_OFFSET_V1; + } else { + m4_max_offset = M4_MAX_OFFSET_V0; + } while (l > 20) { - size_t ll = l <= (M4_MAX_OFFSET + 1) ? l : (M4_MAX_OFFSET + 1); + size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1); uintptr_t ll_end = (uintptr_t) ip + ll; if ((ll_end + ((t + ll) >> 5)) <= ll_end) break; BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); - t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem); + t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem, + &state_offset, bitstream_version); ip += ll; op += *out_len; l -= ll; @@ -242,7 +327,7 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len, if (op == out && t <= 238) { *op++ = (17 + t); } else if (t <= 3) { - op[-2] |= t; + op[state_offset] |= t; } else if (t <= 18) { *op++ = (t - 3); } else { @@ -273,7 +358,24 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len, *out_len = op - out; return LZO_E_OK; } + +int lzo1x_1_compress(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) +{ + return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, 0); +} + +int lzorle1x_1_compress(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) +{ + return lzogeneric1x_1_compress(in, in_len, out, out_len, + wrkmem, LZO_VERSION); +} + EXPORT_SYMBOL_GPL(lzo1x_1_compress); +EXPORT_SYMBOL_GPL(lzorle1x_1_compress); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO1X-1 Compressor"); diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c index a1c387f6afba..6d2600ea3b55 100644 --- a/lib/lzo/lzo1x_decompress_safe.c +++ b/lib/lzo/lzo1x_decompress_safe.c @@ -46,11 +46,23 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, const unsigned char * const ip_end = in + in_len; unsigned char * const op_end = out + *out_len; + unsigned char bitstream_version; + op = out; ip = in; if (unlikely(in_len < 3)) goto input_overrun; + + if (likely(*ip == 17)) { + bitstream_version = ip[1]; + ip += 2; + if (unlikely(in_len < 5)) + goto input_overrun; + } else { + bitstream_version = 0; + } + if (*ip > 17) { t = *ip++ - 17; if (t < 4) { @@ -154,32 +166,49 @@ copy_literal_run: m_pos -= next >> 2; next &= 3; } else { - m_pos = op; - m_pos -= (t & 8) << 11; - t = (t & 7) + (3 - 1); - if (unlikely(t == 2)) { - size_t offset; - const unsigned char *ip_last = ip; + NEED_IP(2); + next = get_unaligned_le16(ip); + if (((next & 0xfffc) == 0xfffc) && + ((t & 0xf8) == 0x18) && + likely(bitstream_version)) { + NEED_IP(3); + t &= 7; + t |= ip[2] << 3; + t += MIN_ZERO_RUN_LENGTH; + NEED_OP(t); + memset(op, 0, t); + op += t; + next &= 3; + ip += 3; + goto match_next; + } else { + m_pos = op; + m_pos -= (t & 8) << 11; + t = (t & 7) + (3 - 1); + if (unlikely(t == 2)) { + size_t offset; + const unsigned char *ip_last = ip; - while (unlikely(*ip == 0)) { - ip++; - NEED_IP(1); - } - offset = ip - ip_last; - if (unlikely(offset > MAX_255_COUNT)) - return LZO_E_ERROR; + while (unlikely(*ip == 0)) { + ip++; + NEED_IP(1); + } + offset = ip - ip_last; + if (unlikely(offset > MAX_255_COUNT)) + return LZO_E_ERROR; - offset = (offset << 8) - offset; - t += offset + 7 + *ip++; - NEED_IP(2); + offset = (offset << 8) - offset; + t += offset + 7 + *ip++; + NEED_IP(2); + next = get_unaligned_le16(ip); + } + ip += 2; + m_pos -= next >> 2; + next &= 3; + if (m_pos == op) + goto eof_found; + m_pos -= 0x4000; } - next = get_unaligned_le16(ip); - ip += 2; - m_pos -= next >> 2; - next &= 3; - if (m_pos == op) - goto eof_found; - m_pos -= 0x4000; } TEST_LB(m_pos); #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) diff --git a/lib/lzo/lzodefs.h b/lib/lzo/lzodefs.h index 4edefd2f540c..b60851fcf6ce 100644 --- a/lib/lzo/lzodefs.h +++ b/lib/lzo/lzodefs.h @@ -13,9 +13,15 @@ */ +/* Version + * 0: original lzo version + * 1: lzo with support for RLE + */ +#define LZO_VERSION 1 + #define COPY4(dst, src) \ put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst)) -#if defined(__x86_64__) +#if defined(CONFIG_X86_64) || defined(CONFIG_ARM64) #define COPY8(dst, src) \ put_unaligned(get_unaligned((const u64 *)(src)), (u64 *)(dst)) #else @@ -25,19 +31,21 @@ #if defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) #error "conflicting endian definitions" -#elif defined(__x86_64__) +#elif defined(CONFIG_X86_64) || defined(CONFIG_ARM64) #define LZO_USE_CTZ64 1 #define LZO_USE_CTZ32 1 -#elif defined(__i386__) || defined(__powerpc__) +#define LZO_FAST_64BIT_MEMORY_ACCESS +#elif defined(CONFIG_X86) || defined(CONFIG_PPC) #define LZO_USE_CTZ32 1 -#elif defined(__arm__) && (__LINUX_ARM_ARCH__ >= 5) +#elif defined(CONFIG_ARM) && (__LINUX_ARM_ARCH__ >= 5) #define LZO_USE_CTZ32 1 #endif #define M1_MAX_OFFSET 0x0400 #define M2_MAX_OFFSET 0x0800 #define M3_MAX_OFFSET 0x4000 -#define M4_MAX_OFFSET 0xbfff +#define M4_MAX_OFFSET_V0 0xbfff +#define M4_MAX_OFFSET_V1 0xbffe #define M1_MIN_LEN 2 #define M1_MAX_LEN 2 @@ -53,6 +61,9 @@ #define M3_MARKER 32 #define M4_MARKER 16 +#define MIN_ZERO_RUN_LENGTH 4 +#define MAX_ZERO_RUN_LENGTH (2047 + MIN_ZERO_RUN_LENGTH) + #define lzo_dict_t unsigned short #define D_BITS 13 #define D_SIZE (1u << D_BITS) diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc index d5242f544551..b7c68030da4f 100644 --- a/lib/raid6/neon.uc +++ b/lib/raid6/neon.uc @@ -28,7 +28,6 @@ typedef uint8x16_t unative_t; -#define NBYTES(x) ((unative_t){x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x}) #define NSIZE sizeof(unative_t) /* @@ -61,7 +60,7 @@ void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs) int d, z, z0; register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; - const unative_t x1d = NBYTES(0x1d); + const unative_t x1d = vdupq_n_u8(0x1d); z0 = disks - 3; /* Highest data disk */ p = dptr[z0+1]; /* XOR parity */ @@ -92,7 +91,7 @@ void raid6_neon$#_xor_syndrome_real(int disks, int start, int stop, int d, z, z0; register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; - const unative_t x1d = NBYTES(0x1d); + const unative_t x1d = vdupq_n_u8(0x1d); z0 = stop; /* P/Q right side optimization */ p = dptr[disks-2]; /* XOR parity */ diff --git a/lib/raid6/recov_neon_inner.c b/lib/raid6/recov_neon_inner.c index 8cd20c9f834a..f13c07f82297 100644 --- a/lib/raid6/recov_neon_inner.c +++ b/lib/raid6/recov_neon_inner.c @@ -10,11 +10,6 @@ #include <arm_neon.h> -static const uint8x16_t x0f = { - 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, - 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, -}; - #ifdef CONFIG_ARM /* * AArch32 does not provide this intrinsic natively because it does not @@ -41,6 +36,7 @@ void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp, uint8x16_t pm1 = vld1q_u8(pbmul + 16); uint8x16_t qm0 = vld1q_u8(qmul); uint8x16_t qm1 = vld1q_u8(qmul + 16); + uint8x16_t x0f = vdupq_n_u8(0x0f); /* * while ( bytes-- ) { @@ -60,14 +56,14 @@ void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp, px = veorq_u8(vld1q_u8(p), vld1q_u8(dp)); vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq)); - vy = (uint8x16_t)vshrq_n_s16((int16x8_t)vx, 4); + vy = vshrq_n_u8(vx, 4); vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f)); - vy = vqtbl1q_u8(qm1, vandq_u8(vy, x0f)); + vy = vqtbl1q_u8(qm1, vy); qx = veorq_u8(vx, vy); - vy = (uint8x16_t)vshrq_n_s16((int16x8_t)px, 4); + vy = vshrq_n_u8(px, 4); vx = vqtbl1q_u8(pm0, vandq_u8(px, x0f)); - vy = vqtbl1q_u8(pm1, vandq_u8(vy, x0f)); + vy = vqtbl1q_u8(pm1, vy); vx = veorq_u8(vx, vy); db = veorq_u8(vx, qx); @@ -87,6 +83,7 @@ void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq, { uint8x16_t qm0 = vld1q_u8(qmul); uint8x16_t qm1 = vld1q_u8(qmul + 16); + uint8x16_t x0f = vdupq_n_u8(0x0f); /* * while (bytes--) { @@ -100,9 +97,9 @@ void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq, vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq)); - vy = (uint8x16_t)vshrq_n_s16((int16x8_t)vx, 4); + vy = vshrq_n_u8(vx, 4); vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f)); - vy = vqtbl1q_u8(qm1, vandq_u8(vy, x0f)); + vy = vqtbl1q_u8(qm1, vy); vx = veorq_u8(vx, vy); vy = veorq_u8(vx, vld1q_u8(p)); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 9ba349e775ef..739dc9fe2c55 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -625,6 +625,32 @@ bool __sg_page_iter_next(struct sg_page_iter *piter) } EXPORT_SYMBOL(__sg_page_iter_next); +static int sg_dma_page_count(struct scatterlist *sg) +{ + return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT; +} + +bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter) +{ + struct sg_page_iter *piter = &dma_iter->base; + + if (!piter->__nents || !piter->sg) + return false; + + piter->sg_pgoffset += piter->__pg_advance; + piter->__pg_advance = 1; + + while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) { + piter->sg_pgoffset -= sg_dma_page_count(piter->sg); + piter->sg = sg_next(piter->sg); + if (!--piter->__nents || !piter->sg) + return false; + } + + return true; +} +EXPORT_SYMBOL(__sg_page_iter_dma_next); + /** * sg_miter_start - start mapping iteration over a sg list * @miter: sg mapping iter to be started diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 7cab9a9869ac..7222093ee00b 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -631,11 +631,6 @@ static ssize_t trigger_batched_requests_store(struct device *dev, for (i = 0; i < test_fw_config->num_requests; i++) { req = &test_fw_config->reqs[i]; - if (!req) { - WARN_ON(1); - rc = -ENOMEM; - goto out_bail; - } req->fw = NULL; req->idx = i; req->name = test_fw_config->name; @@ -737,10 +732,6 @@ ssize_t trigger_batched_requests_async_store(struct device *dev, for (i = 0; i < test_fw_config->num_requests; i++) { req = &test_fw_config->reqs[i]; - if (!req) { - WARN_ON(1); - goto out_bail; - } req->name = test_fw_config->name; req->fw = NULL; req->idx = i; diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 51b78405bf24..7de2702621dc 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -480,29 +480,6 @@ static noinline void __init copy_user_test(void) kfree(kmem); } -static noinline void __init use_after_scope_test(void) -{ - volatile char *volatile p; - - pr_info("use-after-scope on int\n"); - { - int local = 0; - - p = (char *)&local; - } - p[0] = 1; - p[3] = 1; - - pr_info("use-after-scope on array\n"); - { - char local[1024] = {0}; - - p = local; - } - p[0] = 1; - p[1023] = 1; -} - static noinline void __init kasan_alloca_oob_left(void) { volatile int i = 10; @@ -682,7 +659,6 @@ static int __init kmalloc_tests_init(void) kasan_alloca_oob_right(); ksize_unpoisons_memory(); copy_user_test(); - use_after_scope_test(); kmem_cache_double_free(); kmem_cache_invalid_free(); kasan_memchr(); diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c new file mode 100644 index 000000000000..13115b6f2b88 --- /dev/null +++ b/lib/test_stackinit.c @@ -0,0 +1,378 @@ +// SPDX-Licenses: GPLv2 +/* + * Test cases for compiler-based stack variable zeroing via future + * compiler flags or CONFIG_GCC_PLUGIN_STRUCTLEAK*. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> + +/* Exfiltration buffer. */ +#define MAX_VAR_SIZE 128 +static char check_buf[MAX_VAR_SIZE]; + +/* Character array to trigger stack protector in all functions. */ +#define VAR_BUFFER 32 + +/* Volatile mask to convince compiler to copy memory with 0xff. */ +static volatile u8 forced_mask = 0xff; + +/* Location and size tracking to validate fill and test are colocated. */ +static void *fill_start, *target_start; +static size_t fill_size, target_size; + +static bool range_contains(char *haystack_start, size_t haystack_size, + char *needle_start, size_t needle_size) +{ + if (needle_start >= haystack_start && + needle_start + needle_size <= haystack_start + haystack_size) + return true; + return false; +} + +#define DO_NOTHING_TYPE_SCALAR(var_type) var_type +#define DO_NOTHING_TYPE_STRING(var_type) void +#define DO_NOTHING_TYPE_STRUCT(var_type) void + +#define DO_NOTHING_RETURN_SCALAR(ptr) *(ptr) +#define DO_NOTHING_RETURN_STRING(ptr) /**/ +#define DO_NOTHING_RETURN_STRUCT(ptr) /**/ + +#define DO_NOTHING_CALL_SCALAR(var, name) \ + (var) = do_nothing_ ## name(&(var)) +#define DO_NOTHING_CALL_STRING(var, name) \ + do_nothing_ ## name(var) +#define DO_NOTHING_CALL_STRUCT(var, name) \ + do_nothing_ ## name(&(var)) + +#define FETCH_ARG_SCALAR(var) &var +#define FETCH_ARG_STRING(var) var +#define FETCH_ARG_STRUCT(var) &var + +#define FILL_SIZE_STRING 16 + +#define INIT_CLONE_SCALAR /**/ +#define INIT_CLONE_STRING [FILL_SIZE_STRING] +#define INIT_CLONE_STRUCT /**/ + +#define INIT_SCALAR_none /**/ +#define INIT_SCALAR_zero = 0 + +#define INIT_STRING_none [FILL_SIZE_STRING] /**/ +#define INIT_STRING_zero [FILL_SIZE_STRING] = { } + +#define INIT_STRUCT_none /**/ +#define INIT_STRUCT_zero = { } +#define INIT_STRUCT_static_partial = { .two = 0, } +#define INIT_STRUCT_static_all = { .one = arg->one, \ + .two = arg->two, \ + .three = arg->three, \ + .four = arg->four, \ + } +#define INIT_STRUCT_dynamic_partial = { .two = arg->two, } +#define INIT_STRUCT_dynamic_all = { .one = arg->one, \ + .two = arg->two, \ + .three = arg->three, \ + .four = arg->four, \ + } +#define INIT_STRUCT_runtime_partial ; \ + var.two = 0 +#define INIT_STRUCT_runtime_all ; \ + var.one = 0; \ + var.two = 0; \ + var.three = 0; \ + memset(&var.four, 0, \ + sizeof(var.four)) + +/* + * @name: unique string name for the test + * @var_type: type to be tested for zeroing initialization + * @which: is this a SCALAR, STRING, or STRUCT type? + * @init_level: what kind of initialization is performed + */ +#define DEFINE_TEST_DRIVER(name, var_type, which) \ +/* Returns 0 on success, 1 on failure. */ \ +static noinline __init int test_ ## name (void) \ +{ \ + var_type zero INIT_CLONE_ ## which; \ + int ignored; \ + u8 sum = 0, i; \ + \ + /* Notice when a new test is larger than expected. */ \ + BUILD_BUG_ON(sizeof(zero) > MAX_VAR_SIZE); \ + \ + /* Fill clone type with zero for per-field init. */ \ + memset(&zero, 0x00, sizeof(zero)); \ + /* Fill stack with 0xFF. */ \ + ignored = leaf_ ##name((unsigned long)&ignored, 1, \ + FETCH_ARG_ ## which(zero)); \ + /* Clear entire check buffer for later bit tests. */ \ + memset(check_buf, 0x00, sizeof(check_buf)); \ + /* Extract stack-defined variable contents. */ \ + ignored = leaf_ ##name((unsigned long)&ignored, 0, \ + FETCH_ARG_ ## which(zero)); \ + \ + /* Validate that compiler lined up fill and target. */ \ + if (!range_contains(fill_start, fill_size, \ + target_start, target_size)) { \ + pr_err(#name ": stack fill missed target!?\n"); \ + pr_err(#name ": fill %zu wide\n", fill_size); \ + pr_err(#name ": target offset by %d\n", \ + (int)((ssize_t)(uintptr_t)fill_start - \ + (ssize_t)(uintptr_t)target_start)); \ + return 1; \ + } \ + \ + /* Look for any set bits in the check region. */ \ + for (i = 0; i < sizeof(check_buf); i++) \ + sum += (check_buf[i] != 0); \ + \ + if (sum == 0) \ + pr_info(#name " ok\n"); \ + else \ + pr_warn(#name " FAIL (uninit bytes: %d)\n", \ + sum); \ + \ + return (sum != 0); \ +} +#define DEFINE_TEST(name, var_type, which, init_level) \ +/* no-op to force compiler into ignoring "uninitialized" vars */\ +static noinline __init DO_NOTHING_TYPE_ ## which(var_type) \ +do_nothing_ ## name(var_type *ptr) \ +{ \ + /* Will always be true, but compiler doesn't know. */ \ + if ((unsigned long)ptr > 0x2) \ + return DO_NOTHING_RETURN_ ## which(ptr); \ + else \ + return DO_NOTHING_RETURN_ ## which(ptr + 1); \ +} \ +static noinline __init int leaf_ ## name(unsigned long sp, \ + bool fill, \ + var_type *arg) \ +{ \ + char buf[VAR_BUFFER]; \ + var_type var INIT_ ## which ## _ ## init_level; \ + \ + target_start = &var; \ + target_size = sizeof(var); \ + /* \ + * Keep this buffer around to make sure we've got a \ + * stack frame of SOME kind... \ + */ \ + memset(buf, (char)(sp && 0xff), sizeof(buf)); \ + /* Fill variable with 0xFF. */ \ + if (fill) { \ + fill_start = &var; \ + fill_size = sizeof(var); \ + memset(fill_start, \ + (char)((sp && 0xff) | forced_mask), \ + fill_size); \ + } \ + \ + /* Silence "never initialized" warnings. */ \ + DO_NOTHING_CALL_ ## which(var, name); \ + \ + /* Exfiltrate "var". */ \ + memcpy(check_buf, target_start, target_size); \ + \ + return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \ +} \ +DEFINE_TEST_DRIVER(name, var_type, which) + +/* Structure with no padding. */ +struct test_packed { + unsigned long one; + unsigned long two; + unsigned long three; + unsigned long four; +}; + +/* Simple structure with padding likely to be covered by compiler. */ +struct test_small_hole { + size_t one; + char two; + /* 3 byte padding hole here. */ + int three; + unsigned long four; +}; + +/* Try to trigger unhandled padding in a structure. */ +struct test_aligned { + u32 internal1; + u64 internal2; +} __aligned(64); + +struct test_big_hole { + u8 one; + u8 two; + u8 three; + /* 61 byte padding hole here. */ + struct test_aligned four; +} __aligned(64); + +struct test_trailing_hole { + char *one; + char *two; + char *three; + char four; + /* "sizeof(unsigned long) - 1" byte padding hole here. */ +}; + +/* Test if STRUCTLEAK is clearing structs with __user fields. */ +struct test_user { + u8 one; + unsigned long two; + char __user *three; + unsigned long four; +}; + +#define DEFINE_SCALAR_TEST(name, init) \ + DEFINE_TEST(name ## _ ## init, name, SCALAR, init) + +#define DEFINE_SCALAR_TESTS(init) \ + DEFINE_SCALAR_TEST(u8, init); \ + DEFINE_SCALAR_TEST(u16, init); \ + DEFINE_SCALAR_TEST(u32, init); \ + DEFINE_SCALAR_TEST(u64, init); \ + DEFINE_TEST(char_array_ ## init, unsigned char, STRING, init) + +#define DEFINE_STRUCT_TEST(name, init) \ + DEFINE_TEST(name ## _ ## init, \ + struct test_ ## name, STRUCT, init) + +#define DEFINE_STRUCT_TESTS(init) \ + DEFINE_STRUCT_TEST(small_hole, init); \ + DEFINE_STRUCT_TEST(big_hole, init); \ + DEFINE_STRUCT_TEST(trailing_hole, init); \ + DEFINE_STRUCT_TEST(packed, init) + +/* These should be fully initialized all the time! */ +DEFINE_SCALAR_TESTS(zero); +DEFINE_STRUCT_TESTS(zero); +/* Static initialization: padding may be left uninitialized. */ +DEFINE_STRUCT_TESTS(static_partial); +DEFINE_STRUCT_TESTS(static_all); +/* Dynamic initialization: padding may be left uninitialized. */ +DEFINE_STRUCT_TESTS(dynamic_partial); +DEFINE_STRUCT_TESTS(dynamic_all); +/* Runtime initialization: padding may be left uninitialized. */ +DEFINE_STRUCT_TESTS(runtime_partial); +DEFINE_STRUCT_TESTS(runtime_all); +/* No initialization without compiler instrumentation. */ +DEFINE_SCALAR_TESTS(none); +DEFINE_STRUCT_TESTS(none); +DEFINE_TEST(user, struct test_user, STRUCT, none); + +/* + * Check two uses through a variable declaration outside either path, + * which was noticed as a special case in porting earlier stack init + * compiler logic. + */ +static int noinline __leaf_switch_none(int path, bool fill) +{ + switch (path) { + uint64_t var; + + case 1: + target_start = &var; + target_size = sizeof(var); + if (fill) { + fill_start = &var; + fill_size = sizeof(var); + + memset(fill_start, forced_mask | 0x55, fill_size); + } + memcpy(check_buf, target_start, target_size); + break; + case 2: + target_start = &var; + target_size = sizeof(var); + if (fill) { + fill_start = &var; + fill_size = sizeof(var); + + memset(fill_start, forced_mask | 0xaa, fill_size); + } + memcpy(check_buf, target_start, target_size); + break; + default: + var = 5; + return var & forced_mask; + } + return 0; +} + +static noinline __init int leaf_switch_1_none(unsigned long sp, bool fill, + uint64_t *arg) +{ + return __leaf_switch_none(1, fill); +} + +static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill, + uint64_t *arg) +{ + return __leaf_switch_none(2, fill); +} + +DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR); +DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR); + +static int __init test_stackinit_init(void) +{ + unsigned int failures = 0; + +#define test_scalars(init) do { \ + failures += test_u8_ ## init (); \ + failures += test_u16_ ## init (); \ + failures += test_u32_ ## init (); \ + failures += test_u64_ ## init (); \ + failures += test_char_array_ ## init (); \ + } while (0) + +#define test_structs(init) do { \ + failures += test_small_hole_ ## init (); \ + failures += test_big_hole_ ## init (); \ + failures += test_trailing_hole_ ## init (); \ + failures += test_packed_ ## init (); \ + } while (0) + + /* These are explicitly initialized and should always pass. */ + test_scalars(zero); + test_structs(zero); + /* Padding here appears to be accidentally always initialized? */ + test_structs(dynamic_partial); + /* Padding initialization depends on compiler behaviors. */ + test_structs(static_partial); + test_structs(static_all); + test_structs(dynamic_all); + test_structs(runtime_partial); + test_structs(runtime_all); + + /* STRUCTLEAK_BYREF_ALL should cover everything from here down. */ + test_scalars(none); + failures += test_switch_1_none(); + failures += test_switch_2_none(); + + /* STRUCTLEAK_BYREF should cover from here down. */ + test_structs(none); + + /* STRUCTLEAK will only cover this. */ + failures += test_user(); + + if (failures == 0) + pr_info("all tests passed!\n"); + else + pr_err("failures: %u\n", failures); + + return failures ? -EINVAL : 0; +} +module_init(test_stackinit_init); + +static void __exit test_stackinit_exit(void) +{ } +module_exit(test_stackinit_exit); + +MODULE_LICENSE("GPL"); diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c index 280f4979d00e..9ea10adf7a66 100644 --- a/lib/test_ubsan.c +++ b/lib/test_ubsan.c @@ -42,14 +42,6 @@ static void test_ubsan_divrem_overflow(void) val /= val2; } -static void test_ubsan_vla_bound_not_positive(void) -{ - volatile int size = -1; - char buf[size]; - - (void)buf; -} - static void test_ubsan_shift_out_of_bounds(void) { volatile int val = -1; @@ -61,7 +53,7 @@ static void test_ubsan_shift_out_of_bounds(void) static void test_ubsan_out_of_bounds(void) { volatile int i = 4, j = 5; - volatile int arr[i]; + volatile int arr[4]; arr[j] = i; } @@ -113,7 +105,6 @@ static const test_ubsan_fp test_ubsan_array[] = { test_ubsan_mul_overflow, test_ubsan_negate_overflow, test_ubsan_divrem_overflow, - test_ubsan_vla_bound_not_positive, test_ubsan_shift_out_of_bounds, test_ubsan_out_of_bounds, test_ubsan_load_invalid_value, diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c new file mode 100644 index 000000000000..83cdcaa82bf6 --- /dev/null +++ b/lib/test_vmalloc.c @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Test module for stress and analyze performance of vmalloc allocator. + * (C) 2018 Uladzislau Rezki (Sony) <urezki@gmail.com> + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/random.h> +#include <linux/kthread.h> +#include <linux/moduleparam.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/rwsem.h> +#include <linux/mm.h> + +#define __param(type, name, init, msg) \ + static type name = init; \ + module_param(name, type, 0444); \ + MODULE_PARM_DESC(name, msg) \ + +__param(bool, single_cpu_test, false, + "Use single first online CPU to run tests"); + +__param(bool, sequential_test_order, false, + "Use sequential stress tests order"); + +__param(int, test_repeat_count, 1, + "Set test repeat counter"); + +__param(int, test_loop_count, 1000000, + "Set test loop counter"); + +__param(int, run_test_mask, INT_MAX, + "Set tests specified in the mask.\n\n" + "\t\tid: 1, name: fix_size_alloc_test\n" + "\t\tid: 2, name: full_fit_alloc_test\n" + "\t\tid: 4, name: long_busy_list_alloc_test\n" + "\t\tid: 8, name: random_size_alloc_test\n" + "\t\tid: 16, name: fix_align_alloc_test\n" + "\t\tid: 32, name: random_size_align_alloc_test\n" + "\t\tid: 64, name: align_shift_alloc_test\n" + "\t\tid: 128, name: pcpu_alloc_test\n" + /* Add a new test case description here. */ +); + +/* + * Depends on single_cpu_test parameter. If it is true, then + * use first online CPU to trigger a test on, otherwise go with + * all online CPUs. + */ +static cpumask_t cpus_run_test_mask = CPU_MASK_NONE; + +/* + * Read write semaphore for synchronization of setup + * phase that is done in main thread and workers. + */ +static DECLARE_RWSEM(prepare_for_test_rwsem); + +/* + * Completion tracking for worker threads. + */ +static DECLARE_COMPLETION(test_all_done_comp); +static atomic_t test_n_undone = ATOMIC_INIT(0); + +static inline void +test_report_one_done(void) +{ + if (atomic_dec_and_test(&test_n_undone)) + complete(&test_all_done_comp); +} + +static int random_size_align_alloc_test(void) +{ + unsigned long size, align, rnd; + void *ptr; + int i; + + for (i = 0; i < test_loop_count; i++) { + get_random_bytes(&rnd, sizeof(rnd)); + + /* + * Maximum 1024 pages, if PAGE_SIZE is 4096. + */ + align = 1 << (rnd % 23); + + /* + * Maximum 10 pages. + */ + size = ((rnd % 10) + 1) * PAGE_SIZE; + + ptr = __vmalloc_node_range(size, align, + VMALLOC_START, VMALLOC_END, + GFP_KERNEL | __GFP_ZERO, + PAGE_KERNEL, + 0, 0, __builtin_return_address(0)); + + if (!ptr) + return -1; + + vfree(ptr); + } + + return 0; +} + +/* + * This test case is supposed to be failed. + */ +static int align_shift_alloc_test(void) +{ + unsigned long align; + void *ptr; + int i; + + for (i = 0; i < BITS_PER_LONG; i++) { + align = ((unsigned long) 1) << i; + + ptr = __vmalloc_node_range(PAGE_SIZE, align, + VMALLOC_START, VMALLOC_END, + GFP_KERNEL | __GFP_ZERO, + PAGE_KERNEL, + 0, 0, __builtin_return_address(0)); + + if (!ptr) + return -1; + + vfree(ptr); + } + + return 0; +} + +static int fix_align_alloc_test(void) +{ + void *ptr; + int i; + + for (i = 0; i < test_loop_count; i++) { + ptr = __vmalloc_node_range(5 * PAGE_SIZE, + THREAD_ALIGN << 1, + VMALLOC_START, VMALLOC_END, + GFP_KERNEL | __GFP_ZERO, + PAGE_KERNEL, + 0, 0, __builtin_return_address(0)); + + if (!ptr) + return -1; + + vfree(ptr); + } + + return 0; +} + +static int random_size_alloc_test(void) +{ + unsigned int n; + void *p; + int i; + + for (i = 0; i < test_loop_count; i++) { + get_random_bytes(&n, sizeof(i)); + n = (n % 100) + 1; + + p = vmalloc(n * PAGE_SIZE); + + if (!p) + return -1; + + *((__u8 *)p) = 1; + vfree(p); + } + + return 0; +} + +static int long_busy_list_alloc_test(void) +{ + void *ptr_1, *ptr_2; + void **ptr; + int rv = -1; + int i; + + ptr = vmalloc(sizeof(void *) * 15000); + if (!ptr) + return rv; + + for (i = 0; i < 15000; i++) + ptr[i] = vmalloc(1 * PAGE_SIZE); + + for (i = 0; i < test_loop_count; i++) { + ptr_1 = vmalloc(100 * PAGE_SIZE); + if (!ptr_1) + goto leave; + + ptr_2 = vmalloc(1 * PAGE_SIZE); + if (!ptr_2) { + vfree(ptr_1); + goto leave; + } + + *((__u8 *)ptr_1) = 0; + *((__u8 *)ptr_2) = 1; + + vfree(ptr_1); + vfree(ptr_2); + } + + /* Success */ + rv = 0; + +leave: + for (i = 0; i < 15000; i++) + vfree(ptr[i]); + + vfree(ptr); + return rv; +} + +static int full_fit_alloc_test(void) +{ + void **ptr, **junk_ptr, *tmp; + int junk_length; + int rv = -1; + int i; + + junk_length = fls(num_online_cpus()); + junk_length *= (32 * 1024 * 1024 / PAGE_SIZE); + + ptr = vmalloc(sizeof(void *) * junk_length); + if (!ptr) + return rv; + + junk_ptr = vmalloc(sizeof(void *) * junk_length); + if (!junk_ptr) { + vfree(ptr); + return rv; + } + + for (i = 0; i < junk_length; i++) { + ptr[i] = vmalloc(1 * PAGE_SIZE); + junk_ptr[i] = vmalloc(1 * PAGE_SIZE); + } + + for (i = 0; i < junk_length; i++) + vfree(junk_ptr[i]); + + for (i = 0; i < test_loop_count; i++) { + tmp = vmalloc(1 * PAGE_SIZE); + + if (!tmp) + goto error; + + *((__u8 *)tmp) = 1; + vfree(tmp); + } + + /* Success */ + rv = 0; + +error: + for (i = 0; i < junk_length; i++) + vfree(ptr[i]); + + vfree(ptr); + vfree(junk_ptr); + + return rv; +} + +static int fix_size_alloc_test(void) +{ + void *ptr; + int i; + + for (i = 0; i < test_loop_count; i++) { + ptr = vmalloc(3 * PAGE_SIZE); + + if (!ptr) + return -1; + + *((__u8 *)ptr) = 0; + + vfree(ptr); + } + + return 0; +} + +static int +pcpu_alloc_test(void) +{ + int rv = 0; +#ifndef CONFIG_NEED_PER_CPU_KM + void __percpu **pcpu; + size_t size, align; + int i; + + pcpu = vmalloc(sizeof(void __percpu *) * 35000); + if (!pcpu) + return -1; + + for (i = 0; i < 35000; i++) { + unsigned int r; + + get_random_bytes(&r, sizeof(i)); + size = (r % (PAGE_SIZE / 4)) + 1; + + /* + * Maximum PAGE_SIZE + */ + get_random_bytes(&r, sizeof(i)); + align = 1 << ((i % 11) + 1); + + pcpu[i] = __alloc_percpu(size, align); + if (!pcpu[i]) + rv = -1; + } + + for (i = 0; i < 35000; i++) + free_percpu(pcpu[i]); + + vfree(pcpu); +#endif + return rv; +} + +struct test_case_desc { + const char *test_name; + int (*test_func)(void); +}; + +static struct test_case_desc test_case_array[] = { + { "fix_size_alloc_test", fix_size_alloc_test }, + { "full_fit_alloc_test", full_fit_alloc_test }, + { "long_busy_list_alloc_test", long_busy_list_alloc_test }, + { "random_size_alloc_test", random_size_alloc_test }, + { "fix_align_alloc_test", fix_align_alloc_test }, + { "random_size_align_alloc_test", random_size_align_alloc_test }, + { "align_shift_alloc_test", align_shift_alloc_test }, + { "pcpu_alloc_test", pcpu_alloc_test }, + /* Add a new test case here. */ +}; + +struct test_case_data { + int test_failed; + int test_passed; + u64 time; +}; + +/* Split it to get rid of: WARNING: line over 80 characters */ +static struct test_case_data + per_cpu_test_data[NR_CPUS][ARRAY_SIZE(test_case_array)]; + +static struct test_driver { + struct task_struct *task; + unsigned long start; + unsigned long stop; + int cpu; +} per_cpu_test_driver[NR_CPUS]; + +static void shuffle_array(int *arr, int n) +{ + unsigned int rnd; + int i, j, x; + + for (i = n - 1; i > 0; i--) { + get_random_bytes(&rnd, sizeof(rnd)); + + /* Cut the range. */ + j = rnd % i; + + /* Swap indexes. */ + x = arr[i]; + arr[i] = arr[j]; + arr[j] = x; + } +} + +static int test_func(void *private) +{ + struct test_driver *t = private; + cpumask_t newmask = CPU_MASK_NONE; + int random_array[ARRAY_SIZE(test_case_array)]; + int index, i, j, ret; + ktime_t kt; + u64 delta; + + cpumask_set_cpu(t->cpu, &newmask); + set_cpus_allowed_ptr(current, &newmask); + + for (i = 0; i < ARRAY_SIZE(test_case_array); i++) + random_array[i] = i; + + if (!sequential_test_order) + shuffle_array(random_array, ARRAY_SIZE(test_case_array)); + + /* + * Block until initialization is done. + */ + down_read(&prepare_for_test_rwsem); + + t->start = get_cycles(); + for (i = 0; i < ARRAY_SIZE(test_case_array); i++) { + index = random_array[i]; + + /* + * Skip tests if run_test_mask has been specified. + */ + if (!((run_test_mask & (1 << index)) >> index)) + continue; + + kt = ktime_get(); + for (j = 0; j < test_repeat_count; j++) { + ret = test_case_array[index].test_func(); + if (!ret) + per_cpu_test_data[t->cpu][index].test_passed++; + else + per_cpu_test_data[t->cpu][index].test_failed++; + } + + /* + * Take an average time that test took. + */ + delta = (u64) ktime_us_delta(ktime_get(), kt); + do_div(delta, (u32) test_repeat_count); + + per_cpu_test_data[t->cpu][index].time = delta; + } + t->stop = get_cycles(); + + up_read(&prepare_for_test_rwsem); + test_report_one_done(); + + /* + * Wait for the kthread_stop() call. + */ + while (!kthread_should_stop()) + msleep(10); + + return 0; +} + +static void +init_test_configurtion(void) +{ + /* + * Reset all data of all CPUs. + */ + memset(per_cpu_test_data, 0, sizeof(per_cpu_test_data)); + + if (single_cpu_test) + cpumask_set_cpu(cpumask_first(cpu_online_mask), + &cpus_run_test_mask); + else + cpumask_and(&cpus_run_test_mask, cpu_online_mask, + cpu_online_mask); + + if (test_repeat_count <= 0) + test_repeat_count = 1; + + if (test_loop_count <= 0) + test_loop_count = 1; +} + +static void do_concurrent_test(void) +{ + int cpu, ret; + + /* + * Set some basic configurations plus sanity check. + */ + init_test_configurtion(); + + /* + * Put on hold all workers. + */ + down_write(&prepare_for_test_rwsem); + + for_each_cpu(cpu, &cpus_run_test_mask) { + struct test_driver *t = &per_cpu_test_driver[cpu]; + + t->cpu = cpu; + t->task = kthread_run(test_func, t, "vmalloc_test/%d", cpu); + + if (!IS_ERR(t->task)) + /* Success. */ + atomic_inc(&test_n_undone); + else + pr_err("Failed to start kthread for %d CPU\n", cpu); + } + + /* + * Now let the workers do their job. + */ + up_write(&prepare_for_test_rwsem); + + /* + * Sleep quiet until all workers are done with 1 second + * interval. Since the test can take a lot of time we + * can run into a stack trace of the hung task. That is + * why we go with completion_timeout and HZ value. + */ + do { + ret = wait_for_completion_timeout(&test_all_done_comp, HZ); + } while (!ret); + + for_each_cpu(cpu, &cpus_run_test_mask) { + struct test_driver *t = &per_cpu_test_driver[cpu]; + int i; + + if (!IS_ERR(t->task)) + kthread_stop(t->task); + + for (i = 0; i < ARRAY_SIZE(test_case_array); i++) { + if (!((run_test_mask & (1 << i)) >> i)) + continue; + + pr_info( + "Summary: %s passed: %d failed: %d repeat: %d loops: %d avg: %llu usec\n", + test_case_array[i].test_name, + per_cpu_test_data[cpu][i].test_passed, + per_cpu_test_data[cpu][i].test_failed, + test_repeat_count, test_loop_count, + per_cpu_test_data[cpu][i].time); + } + + pr_info("All test took CPU%d=%lu cycles\n", + cpu, t->stop - t->start); + } +} + +static int vmalloc_test_init(void) +{ + do_concurrent_test(); + return -EAGAIN; /* Fail will directly unload the module */ +} + +static void vmalloc_test_exit(void) +{ +} + +module_init(vmalloc_test_init) +module_exit(vmalloc_test_exit) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Uladzislau Rezki"); +MODULE_DESCRIPTION("vmalloc test module"); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 3add92329bae..791b6fa36905 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -17,6 +17,7 @@ */ #include <stdarg.h> +#include <linux/build_bug.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/module.h> /* for KSYM_SYMBOL_LEN */ @@ -405,6 +406,8 @@ struct printf_spec { unsigned int base:8; /* number base, 8, 10 or 16 only */ signed int precision:16; /* # of digits/chars */ } __packed; +static_assert(sizeof(struct printf_spec) == 8); + #define FIELD_WIDTH_MAX ((1 << 23) - 1) #define PRECISION_MAX ((1 << 15) - 1) @@ -422,8 +425,6 @@ char *number(char *buf, char *end, unsigned long long num, int field_width = spec.field_width; int precision = spec.precision; - BUILD_BUG_ON(sizeof(struct printf_spec) != 8); - /* locase = 0 or 0x20. ORing digits or letters with 'locase' * produces same digits or (maybe lowercased) letters */ locase = (spec.flags & SMALL); @@ -1930,7 +1931,6 @@ char *device_node_string(char *buf, char *end, struct device_node *dn, * (legacy clock framework) of the clock * - 'Cn' For a clock, it prints the name (Common Clock Framework) or address * (legacy clock framework) of the clock - * - 'Cr' For a clock, it prints the current rate of the clock * - 'G' For flags to be printed as a collection of symbolic strings that would * construct the specific value. Supported flags given by option: * p page flags (see struct page) given as pointer to unsigned long |