diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/842/842_decompress.c | 14 | ||||
-rw-r--r-- | lib/Kconfig.debug | 62 | ||||
-rw-r--r-- | lib/Makefile | 1 | ||||
-rw-r--r-- | lib/mpi/mpicoder.c | 21 | ||||
-rw-r--r-- | lib/netdev-notifier-error-inject.c | 55 | ||||
-rw-r--r-- | lib/rhashtable.c | 3 | ||||
-rw-r--r-- | lib/seq_buf.c | 6 | ||||
-rw-r--r-- | lib/test_bpf.c | 120 | ||||
-rw-r--r-- | lib/test_rhashtable.c | 76 |
9 files changed, 318 insertions, 40 deletions
diff --git a/lib/842/842_decompress.c b/lib/842/842_decompress.c index 8881dad2a6a0..a7f278d2ed8f 100644 --- a/lib/842/842_decompress.c +++ b/lib/842/842_decompress.c @@ -69,7 +69,7 @@ struct sw842_param { ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \ (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \ (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \ - WARN(1, "pr_debug param err invalid size %x\n", s)) + 0) static int next_bits(struct sw842_param *p, u64 *d, u8 n); @@ -202,10 +202,14 @@ static int __do_index(struct sw842_param *p, u8 size, u8 bits, u64 fsize) return -EINVAL; } - pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n", - size, (unsigned long)index, (unsigned long)(index * size), - (unsigned long)offset, (unsigned long)total, - (unsigned long)beN_to_cpu(&p->ostart[offset], size)); + if (size != 2 && size != 4 && size != 8) + WARN(1, "__do_index invalid size %x\n", size); + else + pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n", + size, (unsigned long)index, + (unsigned long)(index * size), (unsigned long)offset, + (unsigned long)total, + (unsigned long)beN_to_cpu(&p->ostart[offset], size)); memcpy(p->out, &p->ostart[offset], size); p->out += size; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index c98e93c0a084..ee1ac1cc082c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1495,6 +1495,29 @@ config OF_RECONFIG_NOTIFIER_ERROR_INJECT If unsure, say N. +config NETDEV_NOTIFIER_ERROR_INJECT + tristate "Netdev notifier error injection module" + depends on NET && NOTIFIER_ERROR_INJECTION + help + This option provides the ability to inject artificial errors to + netdevice notifier chain callbacks. It is controlled through debugfs + interface /sys/kernel/debug/notifier-error-inject/netdev + + If the notifier call chain should be failed with some events + notified, write the error code to "actions/<notifier event>/error". + + Example: Inject netdevice mtu change error (-22 = -EINVAL) + + # cd /sys/kernel/debug/notifier-error-inject/netdev + # echo -22 > actions/NETDEV_CHANGEMTU/error + # ip link set eth0 mtu 1024 + RTNETLINK answers: Invalid argument + + To compile this code as a module, choose M here: the module will + be called netdev-notifier-error-inject. + + If unsure, say N. + config FAULT_INJECTION bool "Fault-injection framework" depends on DEBUG_KERNEL @@ -1863,3 +1886,42 @@ source "samples/Kconfig" source "lib/Kconfig.kgdb" +config ARCH_HAS_DEVMEM_IS_ALLOWED + bool + +config STRICT_DEVMEM + bool "Filter access to /dev/mem" + depends on MMU + depends on ARCH_HAS_DEVMEM_IS_ALLOWED + default y if TILE || PPC + ---help--- + If this option is disabled, you allow userspace (root) access to all + of memory, including kernel and userspace memory. Accidental + access to this is obviously disastrous, but specific access can + be used by people debugging the kernel. Note that with PAT support + enabled, even in this case there are restrictions on /dev/mem + use due to the cache aliasing requirements. + + If this option is switched on, and IO_STRICT_DEVMEM=n, the /dev/mem + file only allows userspace access to PCI space and the BIOS code and + data regions. This is sufficient for dosemu and X and all common + users of /dev/mem. + + If in doubt, say Y. + +config IO_STRICT_DEVMEM + bool "Filter I/O access to /dev/mem" + depends on STRICT_DEVMEM + default STRICT_DEVMEM + ---help--- + If this option is disabled, you allow userspace (root) access to all + io-memory regardless of whether a driver is actively using that + range. Accidental access to this is obviously disastrous, but + specific access can be used by people debugging kernel drivers. + + If this option is switched on, the /dev/mem file only allows + userspace access to *idle* io-memory ranges (see /proc/iomem) This + may break traditional users of /dev/mem (dosemu, legacy X, etc...) + if the driver using a given range cannot be disabled. + + If in doubt, say Y. diff --git a/lib/Makefile b/lib/Makefile index 7f1de26613d2..180dd4d0dd41 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -120,6 +120,7 @@ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o +obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \ of-reconfig-notifier-error-inject.o diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 3db76b8c1115..ec533a6c77b5 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c @@ -135,7 +135,9 @@ EXPORT_SYMBOL_GPL(mpi_read_from_buffer); * @buf: bufer to which the output will be written to. Needs to be at * leaset mpi_get_size(a) long. * @buf_len: size of the buf. - * @nbytes: receives the actual length of the data written. + * @nbytes: receives the actual length of the data written on success and + * the data to-be-written on -EOVERFLOW in case buf_len was too + * small. * @sign: if not NULL, it will be set to the sign of a. * * Return: 0 on success or error code in case of error @@ -148,7 +150,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, unsigned int n = mpi_get_size(a); int i, lzeros = 0; - if (buf_len < n || !buf || !nbytes) + if (!buf || !nbytes) return -EINVAL; if (sign) @@ -163,6 +165,11 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, break; } + if (buf_len < n - lzeros) { + *nbytes = n - lzeros; + return -EOVERFLOW; + } + p = buf; *nbytes = n - lzeros; @@ -332,7 +339,8 @@ EXPORT_SYMBOL_GPL(mpi_set_buffer); * @nbytes: in/out param - it has the be set to the maximum number of * bytes that can be written to sgl. This has to be at least * the size of the integer a. On return it receives the actual - * length of the data written. + * length of the data written on success or the data that would + * be written if buffer was too small. * @sign: if not NULL, it will be set to the sign of a. * * Return: 0 on success or error code in case of error @@ -345,7 +353,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, unsigned int n = mpi_get_size(a); int i, x, y = 0, lzeros = 0, buf_len; - if (!nbytes || *nbytes < n) + if (!nbytes) return -EINVAL; if (sign) @@ -360,6 +368,11 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, break; } + if (*nbytes < n - lzeros) { + *nbytes = n - lzeros; + return -EOVERFLOW; + } + *nbytes = n - lzeros; buf_len = sgl->length; p2 = sg_virt(sgl); diff --git a/lib/netdev-notifier-error-inject.c b/lib/netdev-notifier-error-inject.c new file mode 100644 index 000000000000..13e9c62e216f --- /dev/null +++ b/lib/netdev-notifier-error-inject.c @@ -0,0 +1,55 @@ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> + +#include "notifier-error-inject.h" + +static int priority; +module_param(priority, int, 0); +MODULE_PARM_DESC(priority, "specify netdevice notifier priority"); + +static struct notifier_err_inject netdev_notifier_err_inject = { + .actions = { + { NOTIFIER_ERR_INJECT_ACTION(NETDEV_REGISTER) }, + { NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGEMTU) }, + { NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGENAME) }, + { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRE_UP) }, + { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRE_TYPE_CHANGE) }, + { NOTIFIER_ERR_INJECT_ACTION(NETDEV_POST_INIT) }, + { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEMTU) }, + { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEUPPER) }, + { NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGEUPPER) }, + {} + } +}; + +static struct dentry *dir; + +static int netdev_err_inject_init(void) +{ + int err; + + dir = notifier_err_inject_init("netdev", notifier_err_inject_dir, + &netdev_notifier_err_inject, priority); + if (IS_ERR(dir)) + return PTR_ERR(dir); + + err = register_netdevice_notifier(&netdev_notifier_err_inject.nb); + if (err) + debugfs_remove_recursive(dir); + + return err; +} + +static void netdev_err_inject_exit(void) +{ + unregister_netdevice_notifier(&netdev_notifier_err_inject.nb); + debugfs_remove_recursive(dir); +} + +module_init(netdev_err_inject_init); +module_exit(netdev_err_inject_exit); + +MODULE_DESCRIPTION("Netdevice notifier error injection module"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Nikolay Aleksandrov <razor@blackwall.org>"); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 51282f579760..cc808707d1cf 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -231,9 +231,6 @@ static int rhashtable_rehash_attach(struct rhashtable *ht, */ rcu_assign_pointer(old_tbl->future_tbl, new_tbl); - /* Ensure the new table is visible to readers. */ - smp_wmb(); - spin_unlock_bh(old_tbl->locks); return 0; diff --git a/lib/seq_buf.c b/lib/seq_buf.c index 5c94e1012a91..cb18469e1f49 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c @@ -306,10 +306,12 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt) if (!cnt) return 0; - if (s->len <= s->readpos) + len = seq_buf_used(s); + + if (len <= s->readpos) return -EBUSY; - len = seq_buf_used(s) - s->readpos; + len -= s->readpos; if (cnt > len) cnt = len; ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 10cd1860e5b0..27a7a26b1ece 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -1685,6 +1685,126 @@ static struct bpf_test tests[] = { { }, { { 0, 0x35d97ef2 } } }, + { /* Mainly checking JIT here. */ + "MOV REG64", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffffffffffffLL), + BPF_MOV64_REG(R1, R0), + BPF_MOV64_REG(R2, R1), + BPF_MOV64_REG(R3, R2), + BPF_MOV64_REG(R4, R3), + BPF_MOV64_REG(R5, R4), + BPF_MOV64_REG(R6, R5), + BPF_MOV64_REG(R7, R6), + BPF_MOV64_REG(R8, R7), + BPF_MOV64_REG(R9, R8), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_ALU64_IMM(BPF_MOV, R1, 0), + BPF_ALU64_IMM(BPF_MOV, R2, 0), + BPF_ALU64_IMM(BPF_MOV, R3, 0), + BPF_ALU64_IMM(BPF_MOV, R4, 0), + BPF_ALU64_IMM(BPF_MOV, R5, 0), + BPF_ALU64_IMM(BPF_MOV, R6, 0), + BPF_ALU64_IMM(BPF_MOV, R7, 0), + BPF_ALU64_IMM(BPF_MOV, R8, 0), + BPF_ALU64_IMM(BPF_MOV, R9, 0), + BPF_ALU64_REG(BPF_ADD, R0, R0), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_ALU64_REG(BPF_ADD, R0, R2), + BPF_ALU64_REG(BPF_ADD, R0, R3), + BPF_ALU64_REG(BPF_ADD, R0, R4), + BPF_ALU64_REG(BPF_ADD, R0, R5), + BPF_ALU64_REG(BPF_ADD, R0, R6), + BPF_ALU64_REG(BPF_ADD, R0, R7), + BPF_ALU64_REG(BPF_ADD, R0, R8), + BPF_ALU64_REG(BPF_ADD, R0, R9), + BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfefe } } + }, + { /* Mainly checking JIT here. */ + "MOV REG32", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffffffffffffLL), + BPF_MOV64_REG(R1, R0), + BPF_MOV64_REG(R2, R1), + BPF_MOV64_REG(R3, R2), + BPF_MOV64_REG(R4, R3), + BPF_MOV64_REG(R5, R4), + BPF_MOV64_REG(R6, R5), + BPF_MOV64_REG(R7, R6), + BPF_MOV64_REG(R8, R7), + BPF_MOV64_REG(R9, R8), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_ALU32_IMM(BPF_MOV, R1, 0), + BPF_ALU32_IMM(BPF_MOV, R2, 0), + BPF_ALU32_IMM(BPF_MOV, R3, 0), + BPF_ALU32_IMM(BPF_MOV, R4, 0), + BPF_ALU32_IMM(BPF_MOV, R5, 0), + BPF_ALU32_IMM(BPF_MOV, R6, 0), + BPF_ALU32_IMM(BPF_MOV, R7, 0), + BPF_ALU32_IMM(BPF_MOV, R8, 0), + BPF_ALU32_IMM(BPF_MOV, R9, 0), + BPF_ALU64_REG(BPF_ADD, R0, R0), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_ALU64_REG(BPF_ADD, R0, R2), + BPF_ALU64_REG(BPF_ADD, R0, R3), + BPF_ALU64_REG(BPF_ADD, R0, R4), + BPF_ALU64_REG(BPF_ADD, R0, R5), + BPF_ALU64_REG(BPF_ADD, R0, R6), + BPF_ALU64_REG(BPF_ADD, R0, R7), + BPF_ALU64_REG(BPF_ADD, R0, R8), + BPF_ALU64_REG(BPF_ADD, R0, R9), + BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfefe } } + }, + { /* Mainly checking JIT here. */ + "LD IMM64", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffffffffffffLL), + BPF_MOV64_REG(R1, R0), + BPF_MOV64_REG(R2, R1), + BPF_MOV64_REG(R3, R2), + BPF_MOV64_REG(R4, R3), + BPF_MOV64_REG(R5, R4), + BPF_MOV64_REG(R6, R5), + BPF_MOV64_REG(R7, R6), + BPF_MOV64_REG(R8, R7), + BPF_MOV64_REG(R9, R8), + BPF_LD_IMM64(R0, 0x0LL), + BPF_LD_IMM64(R1, 0x0LL), + BPF_LD_IMM64(R2, 0x0LL), + BPF_LD_IMM64(R3, 0x0LL), + BPF_LD_IMM64(R4, 0x0LL), + BPF_LD_IMM64(R5, 0x0LL), + BPF_LD_IMM64(R6, 0x0LL), + BPF_LD_IMM64(R7, 0x0LL), + BPF_LD_IMM64(R8, 0x0LL), + BPF_LD_IMM64(R9, 0x0LL), + BPF_ALU64_REG(BPF_ADD, R0, R0), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_ALU64_REG(BPF_ADD, R0, R2), + BPF_ALU64_REG(BPF_ADD, R0, R3), + BPF_ALU64_REG(BPF_ADD, R0, R4), + BPF_ALU64_REG(BPF_ADD, R0, R5), + BPF_ALU64_REG(BPF_ADD, R0, R6), + BPF_ALU64_REG(BPF_ADD, R0, R7), + BPF_ALU64_REG(BPF_ADD, R0, R8), + BPF_ALU64_REG(BPF_ADD, R0, R9), + BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfefe } } + }, { "INT: ALU MIX", .u.insns_int = { diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 8c1ad1ced72c..270bf7289b1e 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -36,9 +36,9 @@ static int runs = 4; module_param(runs, int, 0); MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)"); -static int max_size = 65536; +static int max_size = 0; module_param(max_size, int, 0); -MODULE_PARM_DESC(runs, "Maximum table size (default: 65536)"); +MODULE_PARM_DESC(runs, "Maximum table size (default: calculated)"); static bool shrinking = false; module_param(shrinking, bool, 0); @@ -52,6 +52,10 @@ static int tcount = 10; module_param(tcount, int, 0); MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)"); +static bool enomem_retry = false; +module_param(enomem_retry, bool, 0); +MODULE_PARM_DESC(enomem_retry, "Retry insert even if -ENOMEM was returned (default: off)"); + struct test_obj { int value; struct rhash_head node; @@ -76,6 +80,28 @@ static struct rhashtable_params test_rht_params = { static struct semaphore prestart_sem; static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); +static int insert_retry(struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + int err, retries = -1, enomem_retries = 0; + + do { + retries++; + cond_resched(); + err = rhashtable_insert_fast(ht, obj, params); + if (err == -ENOMEM && enomem_retry) { + enomem_retries++; + err = -EBUSY; + } + } while (err == -EBUSY); + + if (enomem_retries) + pr_info(" %u insertions retried after -ENOMEM\n", + enomem_retries); + + return err ? : retries; +} + static int __init test_rht_lookup(struct rhashtable *ht) { unsigned int i; @@ -157,7 +183,7 @@ static s64 __init test_rhashtable(struct rhashtable *ht) { struct test_obj *obj; int err; - unsigned int i, insert_fails = 0; + unsigned int i, insert_retries = 0; s64 start, end; /* @@ -170,22 +196,16 @@ static s64 __init test_rhashtable(struct rhashtable *ht) struct test_obj *obj = &array[i]; obj->value = i * 2; - - err = rhashtable_insert_fast(ht, &obj->node, test_rht_params); - if (err == -ENOMEM || err == -EBUSY) { - /* Mark failed inserts but continue */ - obj->value = TEST_INSERT_FAIL; - insert_fails++; - } else if (err) { + err = insert_retry(ht, &obj->node, test_rht_params); + if (err > 0) + insert_retries += err; + else if (err) return err; - } - - cond_resched(); } - if (insert_fails) - pr_info(" %u insertions failed due to memory pressure\n", - insert_fails); + if (insert_retries) + pr_info(" %u insertions retried due to memory pressure\n", + insert_retries); test_bucket_stats(ht); rcu_read_lock(); @@ -236,13 +256,15 @@ static int thread_lookup_test(struct thread_data *tdata) obj->value, key); err++; } + + cond_resched(); } return err; } static int threadfunc(void *data) { - int i, step, err = 0, insert_fails = 0; + int i, step, err = 0, insert_retries = 0; struct thread_data *tdata = data; up(&prestart_sem); @@ -251,20 +273,18 @@ static int threadfunc(void *data) for (i = 0; i < entries; i++) { tdata->objs[i].value = (tdata->id << 16) | i; - err = rhashtable_insert_fast(&ht, &tdata->objs[i].node, - test_rht_params); - if (err == -ENOMEM || err == -EBUSY) { - tdata->objs[i].value = TEST_INSERT_FAIL; - insert_fails++; + err = insert_retry(&ht, &tdata->objs[i].node, test_rht_params); + if (err > 0) { + insert_retries += err; } else if (err) { pr_err(" thread[%d]: rhashtable_insert_fast failed\n", tdata->id); goto out; } } - if (insert_fails) - pr_info(" thread[%d]: %d insert failures\n", - tdata->id, insert_fails); + if (insert_retries) + pr_info(" thread[%d]: %u insertions retried due to memory pressure\n", + tdata->id, insert_retries); err = thread_lookup_test(tdata); if (err) { @@ -285,6 +305,8 @@ static int threadfunc(void *data) goto out; } tdata->objs[i].value = TEST_INSERT_FAIL; + + cond_resched(); } err = thread_lookup_test(tdata); if (err) { @@ -311,7 +333,7 @@ static int __init test_rht_init(void) entries = min(entries, MAX_ENTRIES); test_rht_params.automatic_shrinking = shrinking; - test_rht_params.max_size = max_size; + test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries); test_rht_params.nelem_hint = size; pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n", @@ -357,6 +379,8 @@ static int __init test_rht_init(void) return -ENOMEM; } + test_rht_params.max_size = max_size ? : + roundup_pow_of_two(tcount * entries); err = rhashtable_init(&ht, &test_rht_params); if (err < 0) { pr_warn("Test failed: Unable to initialize hashtable: %d\n", |