aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig13
-rw-r--r--lib/Kconfig.debug38
-rw-r--r--lib/Makefile5
-rw-r--r--lib/asn1_decoder.c27
-rw-r--r--lib/atomic64.c3
-rw-r--r--lib/atomic64_test.c68
-rw-r--r--lib/bitmap.c43
-rw-r--r--lib/decompress_bunzip2.c6
-rw-r--r--lib/decompress_inflate.c31
-rw-r--r--lib/decompress_unlz4.c6
-rw-r--r--lib/decompress_unlzma.c9
-rw-r--r--lib/decompress_unlzo.c13
-rw-r--r--lib/decompress_unxz.c12
-rw-r--r--lib/devres.c13
-rw-r--r--lib/genalloc.c110
-rw-r--r--lib/iommu-common.c6
-rw-r--r--lib/klist.c41
-rw-r--r--lib/kstrtox.c2
-rw-r--r--lib/lockref.c8
-rw-r--r--lib/mpi/mpicoder.c38
-rw-r--r--lib/nmi_backtrace.c162
-rw-r--r--lib/pci_iomap.c73
-rw-r--r--lib/raid6/neon.c13
-rw-r--r--lib/raid6/neon.uc46
-rw-r--r--lib/rhashtable.c5
-rw-r--r--lib/scatterlist.c4
-rw-r--r--lib/sg_split.c202
-rw-r--r--lib/show_mem.c4
-rw-r--r--lib/string_helpers.c26
-rw-r--r--lib/test-kstrtox.c6
-rw-r--r--lib/test_kasan.c6
-rw-r--r--lib/test_static_key_base.c68
-rw-r--r--lib/test_static_keys.c225
-rw-r--r--lib/vsprintf.c1
-rw-r--r--lib/zlib_deflate/deftree.c6
-rw-r--r--lib/zlib_deflate/defutil.h16
36 files changed, 1152 insertions, 203 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 278890dd1049..2e491ac15622 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -53,9 +53,6 @@ config GENERIC_IO
config STMP_DEVICE
bool
-config PERCPU_RWSEM
- bool
-
config ARCH_USE_CMPXCHG_LOCKREF
bool
@@ -511,6 +508,13 @@ config UCS2_STRING
source "lib/fonts/Kconfig"
+config SG_SPLIT
+ def_bool n
+ help
+ Provides a heler to split scatterlists into chunks, each chunk being a
+ scatterlist. This should be selected by a driver or an API which
+ whishes to split a scatterlist amongst multiple DMA channel.
+
#
# sg chaining option
#
@@ -521,4 +525,7 @@ config ARCH_HAS_SG_CHAIN
config ARCH_HAS_PMEM_API
bool
+config ARCH_HAS_MMIO_FLUSH
+ bool
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e2894b23efb6..ab76b99adc85 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -916,12 +916,6 @@ config DEBUG_RT_MUTEXES
This allows rt mutex semantics violations and rt mutex related
deadlocks (lockups) to be detected and reported automatically.
-config RT_MUTEX_TESTER
- bool "Built-in scriptable tester for rt-mutexes"
- depends on DEBUG_KERNEL && RT_MUTEXES && BROKEN
- help
- This option enables a rt-mutex tester.
-
config DEBUG_SPINLOCK
bool "Spinlock and rw-lock debugging: basic checks"
depends on DEBUG_KERNEL
@@ -1353,20 +1347,6 @@ config RCU_CPU_STALL_TIMEOUT
RCU grace period persists, additional CPU stall warnings are
printed at more widely spaced intervals.
-config RCU_CPU_STALL_INFO
- bool "Print additional diagnostics on RCU CPU stall"
- depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL
- default y
- help
- For each stalled CPU that is aware of the current RCU grace
- period, print out additional per-CPU diagnostic information
- regarding scheduling-clock ticks, idle state, and,
- for RCU_FAST_NO_HZ kernels, idle-entry state.
-
- Say N if you are unsure.
-
- Say Y if you want to enable such diagnostics.
-
config RCU_TRACE
bool "Enable tracing for RCU"
depends on DEBUG_KERNEL
@@ -1379,7 +1359,7 @@ config RCU_TRACE
Say N if you are unsure.
config RCU_EQS_DEBUG
- bool "Use this when adding any sort of NO_HZ support to your arch"
+ bool "Provide debugging asserts for adding NO_HZ support to an arch"
depends on DEBUG_KERNEL
help
This option provides consistency checks in RCU's handling of
@@ -1542,6 +1522,13 @@ config FAIL_MMC_REQUEST
and to test how the mmc host driver handles retries from
the block device.
+config FAIL_FUTEX
+ bool "Fault-injection capability for futexes"
+ select DEBUG_FS
+ depends on FAULT_INJECTION && FUTEX
+ help
+ Provide fault-injection capability for futexes.
+
config FAULT_INJECTION_DEBUG_FS
bool "Debugfs entries for fault-injection capabilities"
depends on FAULT_INJECTION && SYSFS && DEBUG_FS
@@ -1840,6 +1827,15 @@ config MEMTEST
memtest=17, mean do 17 test patterns.
If you are unsure how to answer this question, answer N.
+config TEST_STATIC_KEYS
+ tristate "Test static keys"
+ default n
+ depends on m
+ help
+ Test the static key interfaces.
+
+ If unsure, say N.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index 0d4b30fb60e6..13a7c6ae3fec 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
sha1.o md5.o irq_regs.o argv_split.o \
proportions.o flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
- earlycpio.o seq_buf.o
+ earlycpio.o seq_buf.o nmi_backtrace.o
obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
lib-$(CONFIG_MMU) += ioremap.o
@@ -39,6 +39,8 @@ obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
+obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
+obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -158,6 +160,7 @@ obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
+obj-$(CONFIG_SG_SPLIT) += sg_split.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 1a000bb050f9..2b3f46c049d4 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -24,15 +24,20 @@ static const unsigned char asn1_op_lengths[ASN1_OP__NR] = {
[ASN1_OP_MATCH_JUMP] = 1 + 1 + 1,
[ASN1_OP_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1,
[ASN1_OP_MATCH_ANY] = 1,
+ [ASN1_OP_MATCH_ANY_OR_SKIP] = 1,
[ASN1_OP_MATCH_ANY_ACT] = 1 + 1,
+ [ASN1_OP_MATCH_ANY_ACT_OR_SKIP] = 1 + 1,
[ASN1_OP_COND_MATCH_OR_SKIP] = 1 + 1,
[ASN1_OP_COND_MATCH_ACT_OR_SKIP] = 1 + 1 + 1,
[ASN1_OP_COND_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1,
[ASN1_OP_COND_MATCH_ANY] = 1,
+ [ASN1_OP_COND_MATCH_ANY_OR_SKIP] = 1,
[ASN1_OP_COND_MATCH_ANY_ACT] = 1 + 1,
+ [ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP] = 1 + 1,
[ASN1_OP_COND_FAIL] = 1,
[ASN1_OP_COMPLETE] = 1,
[ASN1_OP_ACT] = 1 + 1,
+ [ASN1_OP_MAYBE_ACT] = 1 + 1,
[ASN1_OP_RETURN] = 1,
[ASN1_OP_END_SEQ] = 1,
[ASN1_OP_END_SEQ_OF] = 1 + 1,
@@ -177,6 +182,7 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
unsigned char flags = 0;
#define FLAG_INDEFINITE_LENGTH 0x01
#define FLAG_MATCHED 0x02
+#define FLAG_LAST_MATCHED 0x04 /* Last tag matched */
#define FLAG_CONS 0x20 /* Corresponds to CONS bit in the opcode tag
* - ie. whether or not we are going to parse
* a compound type.
@@ -208,9 +214,9 @@ next_op:
unsigned char tmp;
/* Skip conditional matches if possible */
- if ((op & ASN1_OP_MATCH__COND &&
- flags & FLAG_MATCHED) ||
- dp == datalen) {
+ if ((op & ASN1_OP_MATCH__COND && flags & FLAG_MATCHED) ||
+ (op & ASN1_OP_MATCH__SKIP && dp == datalen)) {
+ flags &= ~FLAG_LAST_MATCHED;
pc += asn1_op_lengths[op];
goto next_op;
}
@@ -302,7 +308,9 @@ next_op:
/* Decide how to handle the operation */
switch (op) {
case ASN1_OP_MATCH_ANY_ACT:
+ case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
case ASN1_OP_COND_MATCH_ANY_ACT:
+ case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
if (ret < 0)
return ret;
@@ -319,8 +327,10 @@ next_op:
case ASN1_OP_MATCH:
case ASN1_OP_MATCH_OR_SKIP:
case ASN1_OP_MATCH_ANY:
+ case ASN1_OP_MATCH_ANY_OR_SKIP:
case ASN1_OP_COND_MATCH_OR_SKIP:
case ASN1_OP_COND_MATCH_ANY:
+ case ASN1_OP_COND_MATCH_ANY_OR_SKIP:
skip_data:
if (!(flags & FLAG_CONS)) {
if (flags & FLAG_INDEFINITE_LENGTH) {
@@ -422,8 +432,15 @@ next_op:
pc += asn1_op_lengths[op];
goto next_op;
+ case ASN1_OP_MAYBE_ACT:
+ if (!(flags & FLAG_LAST_MATCHED)) {
+ pc += asn1_op_lengths[op];
+ goto next_op;
+ }
case ASN1_OP_ACT:
ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len);
+ if (ret < 0)
+ return ret;
pc += asn1_op_lengths[op];
goto next_op;
@@ -431,6 +448,7 @@ next_op:
if (unlikely(jsp <= 0))
goto jump_stack_underflow;
pc = jump_stack[--jsp];
+ flags |= FLAG_MATCHED | FLAG_LAST_MATCHED;
goto next_op;
default:
@@ -438,7 +456,8 @@ next_op:
}
/* Shouldn't reach here */
- pr_err("ASN.1 decoder error: Found reserved opcode (%u)\n", op);
+ pr_err("ASN.1 decoder error: Found reserved opcode (%u) pc=%zu\n",
+ op, pc);
return -EBADMSG;
data_overrun_error:
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 1298c05ef528..2886ebac6567 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -102,6 +102,9 @@ EXPORT_SYMBOL(atomic64_##op##_return);
ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=)
+ATOMIC64_OP(and, &=)
+ATOMIC64_OP(or, |=)
+ATOMIC64_OP(xor, ^=)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 0211d30d8c39..83c33a5bcffb 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -16,8 +16,39 @@
#include <linux/kernel.h>
#include <linux/atomic.h>
+#define TEST(bit, op, c_op, val) \
+do { \
+ atomic##bit##_set(&v, v0); \
+ r = v0; \
+ atomic##bit##_##op(val, &v); \
+ r c_op val; \
+ WARN(atomic##bit##_read(&v) != r, "%Lx != %Lx\n", \
+ (unsigned long long)atomic##bit##_read(&v), \
+ (unsigned long long)r); \
+} while (0)
+
+static __init void test_atomic(void)
+{
+ int v0 = 0xaaa31337;
+ int v1 = 0xdeadbeef;
+ int onestwos = 0x11112222;
+ int one = 1;
+
+ atomic_t v;
+ int r;
+
+ TEST(, add, +=, onestwos);
+ TEST(, add, +=, -one);
+ TEST(, sub, -=, onestwos);
+ TEST(, sub, -=, -one);
+ TEST(, or, |=, v1);
+ TEST(, and, &=, v1);
+ TEST(, xor, ^=, v1);
+ TEST(, andnot, &= ~, v1);
+}
+
#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
-static __init int test_atomic64(void)
+static __init void test_atomic64(void)
{
long long v0 = 0xaaa31337c001d00dLL;
long long v1 = 0xdeadbeefdeafcafeLL;
@@ -34,15 +65,14 @@ static __init int test_atomic64(void)
BUG_ON(v.counter != r);
BUG_ON(atomic64_read(&v) != r);
- INIT(v0);
- atomic64_add(onestwos, &v);
- r += onestwos;
- BUG_ON(v.counter != r);
-
- INIT(v0);
- atomic64_add(-one, &v);
- r += -one;
- BUG_ON(v.counter != r);
+ TEST(64, add, +=, onestwos);
+ TEST(64, add, +=, -one);
+ TEST(64, sub, -=, onestwos);
+ TEST(64, sub, -=, -one);
+ TEST(64, or, |=, v1);
+ TEST(64, and, &=, v1);
+ TEST(64, xor, ^=, v1);
+ TEST(64, andnot, &= ~, v1);
INIT(v0);
r += onestwos;
@@ -55,16 +85,6 @@ static __init int test_atomic64(void)
BUG_ON(v.counter != r);
INIT(v0);
- atomic64_sub(onestwos, &v);
- r -= onestwos;
- BUG_ON(v.counter != r);
-
- INIT(v0);
- atomic64_sub(-one, &v);
- r -= -one;
- BUG_ON(v.counter != r);
-
- INIT(v0);
r -= onestwos;
BUG_ON(atomic64_sub_return(onestwos, &v) != r);
BUG_ON(v.counter != r);
@@ -147,6 +167,12 @@ static __init int test_atomic64(void)
BUG_ON(!atomic64_inc_not_zero(&v));
r += one;
BUG_ON(v.counter != r);
+}
+
+static __init int test_atomics(void)
+{
+ test_atomic();
+ test_atomic64();
#ifdef CONFIG_X86
pr_info("passed for %s platform %s CX8 and %s SSE\n",
@@ -166,4 +192,4 @@ static __init int test_atomic64(void)
return 0;
}
-core_initcall(test_atomic64);
+core_initcall(test_atomics);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index a578a0189199..814814397cce 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -367,7 +367,8 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
nchunks = nbits = totaldigits = c = 0;
do {
- chunk = ndigits = 0;
+ chunk = 0;
+ ndigits = totaldigits;
/* Get the next chunk of the bitmap */
while (buflen) {
@@ -406,9 +407,9 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
return -EOVERFLOW;
chunk = (chunk << 4) | hex_to_bin(c);
- ndigits++; totaldigits++;
+ totaldigits++;
}
- if (ndigits == 0)
+ if (ndigits == totaldigits)
return -EINVAL;
if (nchunks == 0 && chunk == 0)
continue;
@@ -505,7 +506,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
int nmaskbits)
{
unsigned a, b;
- int c, old_c, totaldigits;
+ int c, old_c, totaldigits, ndigits;
const char __user __force *ubuf = (const char __user __force *)buf;
int at_start, in_range;
@@ -515,6 +516,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
at_start = 1;
in_range = 0;
a = b = 0;
+ ndigits = totaldigits;
/* Get the next cpu# or a range of cpu#'s */
while (buflen) {
@@ -528,23 +530,27 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
if (isspace(c))
continue;
- /*
- * If the last character was a space and the current
- * character isn't '\0', we've got embedded whitespace.
- * This is a no-no, so throw an error.
- */
- if (totaldigits && c && isspace(old_c))
- return -EINVAL;
-
/* A '\0' or a ',' signal the end of a cpu# or range */
if (c == '\0' || c == ',')
break;
+ /*
+ * whitespaces between digits are not allowed,
+ * but it's ok if whitespaces are on head or tail.
+ * when old_c is whilespace,
+ * if totaldigits == ndigits, whitespace is on head.
+ * if whitespace is on tail, it should not run here.
+ * as c was ',' or '\0',
+ * the last code line has broken the current loop.
+ */
+ if ((totaldigits != ndigits) && isspace(old_c))
+ return -EINVAL;
if (c == '-') {
if (at_start || in_range)
return -EINVAL;
b = 0;
in_range = 1;
+ at_start = 1;
continue;
}
@@ -557,15 +563,18 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
at_start = 0;
totaldigits++;
}
+ if (ndigits == totaldigits)
+ continue;
+ /* if no digit is after '-', it's wrong*/
+ if (at_start && in_range)
+ return -EINVAL;
if (!(a <= b))
return -EINVAL;
if (b >= nmaskbits)
return -ERANGE;
- if (!at_start) {
- while (a <= b) {
- set_bit(a, maskp);
- a++;
- }
+ while (a <= b) {
+ set_bit(a, maskp);
+ a++;
}
} while (buflen && c == ',');
return 0;
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 6dd0335ea61b..0234361b24b8 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -743,12 +743,12 @@ exit_0:
}
#ifdef PREBOOT
-STATIC int INIT decompress(unsigned char *buf, long len,
+STATIC int INIT __decompress(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
- unsigned char *outbuf,
+ unsigned char *outbuf, long olen,
long *pos,
- void(*error)(char *x))
+ void (*error)(char *x))
{
return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error);
}
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index d4c7891635ec..555c06bf20da 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -1,4 +1,5 @@
#ifdef STATIC
+#define PREBOOT
/* Pre-boot environment: included */
/* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
@@ -33,23 +34,23 @@ static long INIT nofill(void *buffer, unsigned long len)
}
/* Included from initramfs et al code */
-STATIC int INIT gunzip(unsigned char *buf, long len,
+STATIC int INIT __gunzip(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
- unsigned char *out_buf,
+ unsigned char *out_buf, long out_len,
long *pos,
void(*error)(char *x)) {
u8 *zbuf;
struct z_stream_s *strm;
int rc;
- size_t out_len;
rc = -1;
if (flush) {
out_len = 0x8000; /* 32 K */
out_buf = malloc(out_len);
} else {
- out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
+ if (!out_len)
+ out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
}
if (!out_buf) {
error("Out of memory while allocating output buffer");
@@ -181,4 +182,24 @@ gunzip_nomem1:
return rc; /* returns Z_OK (0) if successful */
}
-#define decompress gunzip
+#ifndef PREBOOT
+STATIC int INIT gunzip(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf,
+ long *pos,
+ void (*error)(char *x))
+{
+ return __gunzip(buf, len, fill, flush, out_buf, 0, pos, error);
+}
+#else
+STATIC int INIT __decompress(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long out_len,
+ long *pos,
+ void (*error)(char *x))
+{
+ return __gunzip(buf, len, fill, flush, out_buf, out_len, pos, error);
+}
+#endif
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
index 40f66ebe57b7..036fc882cd72 100644
--- a/lib/decompress_unlz4.c
+++ b/lib/decompress_unlz4.c
@@ -196,12 +196,12 @@ exit_0:
}
#ifdef PREBOOT
-STATIC int INIT decompress(unsigned char *buf, long in_len,
+STATIC int INIT __decompress(unsigned char *buf, long in_len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
- unsigned char *output,
+ unsigned char *output, long out_len,
long *posp,
- void(*error)(char *x)
+ void (*error)(char *x)
)
{
return unlz4(buf, in_len - 4, fill, flush, output, posp, error);
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 0be83af62b88..ed7a1fd819f2 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -620,7 +620,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, long in_len,
num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
p = (uint16_t *) large_malloc(num_probs * sizeof(*p));
- if (p == 0)
+ if (p == NULL)
goto exit_2;
num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
for (i = 0; i < num_probs; i++)
@@ -667,13 +667,12 @@ exit_0:
}
#ifdef PREBOOT
-STATIC int INIT decompress(unsigned char *buf, long in_len,
+STATIC int INIT __decompress(unsigned char *buf, long in_len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
- unsigned char *output,
+ unsigned char *output, long out_len,
long *posp,
- void(*error)(char *x)
- )
+ void (*error)(char *x))
{
return unlzma(buf, in_len - 4, fill, flush, output, posp, error);
}
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
index b94a31bdd87d..f4c158e3a022 100644
--- a/lib/decompress_unlzo.c
+++ b/lib/decompress_unlzo.c
@@ -31,6 +31,7 @@
*/
#ifdef STATIC
+#define PREBOOT
#include "lzo/lzo1x_decompress_safe.c"
#else
#include <linux/decompress/unlzo.h>
@@ -287,4 +288,14 @@ exit:
return ret;
}
-#define decompress unlzo
+#ifdef PREBOOT
+STATIC int INIT __decompress(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long olen,
+ long *pos,
+ void (*error)(char *x))
+{
+ return unlzo(buf, len, fill, flush, out_buf, pos, error);
+}
+#endif
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
index b07a78340e9d..25d59a95bd66 100644
--- a/lib/decompress_unxz.c
+++ b/lib/decompress_unxz.c
@@ -394,4 +394,14 @@ error_alloc_state:
* This macro is used by architecture-specific files to decompress
* the kernel image.
*/
-#define decompress unxz
+#ifdef XZ_PREBOOT
+STATIC int INIT __decompress(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long olen,
+ long *pos,
+ void (*error)(char *x))
+{
+ return unxz(buf, len, fill, flush, out_buf, pos, error);
+}
+#endif
diff --git a/lib/devres.c b/lib/devres.c
index fbe2aac522e6..f13a2468ff39 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -119,10 +119,9 @@ EXPORT_SYMBOL(devm_iounmap);
* @dev: generic device to handle the resource for
* @res: resource to be handled
*
- * Checks that a resource is a valid memory region, requests the memory region
- * and ioremaps it either as cacheable or as non-cacheable memory depending on
- * the resource's flags. All operations are managed and will be undone on
- * driver detach.
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps it. All operations are managed and will be undone
+ * on driver detach.
*
* Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
* on failure. Usage example:
@@ -153,11 +152,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
return IOMEM_ERR_PTR(-EBUSY);
}
- if (res->flags & IORESOURCE_CACHEABLE)
- dest_ptr = devm_ioremap(dev, res->start, size);
- else
- dest_ptr = devm_ioremap_nocache(dev, res->start, size);
-
+ dest_ptr = devm_ioremap(dev, res->start, size);
if (!dest_ptr) {
dev_err(dev, "ioremap failed for resource %pR\n", res);
devm_release_mem_region(dev, res->start, size);
diff --git a/lib/genalloc.c b/lib/genalloc.c
index daf0afb6d979..116a166b096f 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -160,6 +160,7 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
pool->min_alloc_order = min_alloc_order;
pool->algo = gen_pool_first_fit;
pool->data = NULL;
+ pool->name = NULL;
}
return pool;
}
@@ -252,8 +253,8 @@ void gen_pool_destroy(struct gen_pool *pool)
kfree(chunk);
}
+ kfree_const(pool->name);
kfree(pool);
- return;
}
EXPORT_SYMBOL(gen_pool_destroy);
@@ -570,53 +571,88 @@ static void devm_gen_pool_release(struct device *dev, void *res)
gen_pool_destroy(*(struct gen_pool **)res);
}
+static int devm_gen_pool_match(struct device *dev, void *res, void *data)
+{
+ struct gen_pool **p = res;
+
+ /* NULL data matches only a pool without an assigned name */
+ if (!data && !(*p)->name)
+ return 1;
+
+ if (!data || !(*p)->name)
+ return 0;
+
+ return !strcmp((*p)->name, data);
+}
+
+/**
+ * gen_pool_get - Obtain the gen_pool (if any) for a device
+ * @dev: device to retrieve the gen_pool from
+ * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
+ *
+ * Returns the gen_pool for the device if one is present, or NULL.
+ */
+struct gen_pool *gen_pool_get(struct device *dev, const char *name)
+{
+ struct gen_pool **p;
+
+ p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
+ (void *)name);
+ if (!p)
+ return NULL;
+ return *p;
+}
+EXPORT_SYMBOL_GPL(gen_pool_get);
+
/**
* devm_gen_pool_create - managed gen_pool_create
* @dev: device that provides the gen_pool
* @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
- * @nid: node id of the node the pool structure should be allocated on, or -1
+ * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
+ * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
*
* Create a new special memory pool that can be used to manage special purpose
* memory not managed by the regular kmalloc/kfree interface. The pool will be
* automatically destroyed by the device management code.
*/
struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
- int nid)
+ int nid, const char *name)
{
struct gen_pool **ptr, *pool;
+ const char *pool_name = NULL;
+
+ /* Check that genpool to be created is uniquely addressed on device */
+ if (gen_pool_get(dev, name))
+ return ERR_PTR(-EINVAL);
+
+ if (name) {
+ pool_name = kstrdup_const(name, GFP_KERNEL);
+ if (!pool_name)
+ return ERR_PTR(-ENOMEM);
+ }
ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
- return NULL;
+ goto free_pool_name;
pool = gen_pool_create(min_alloc_order, nid);
- if (pool) {
- *ptr = pool;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
+ if (!pool)
+ goto free_devres;
+
+ *ptr = pool;
+ pool->name = pool_name;
+ devres_add(dev, ptr);
return pool;
-}
-EXPORT_SYMBOL(devm_gen_pool_create);
-/**
- * gen_pool_get - Obtain the gen_pool (if any) for a device
- * @dev: device to retrieve the gen_pool from
- *
- * Returns the gen_pool for the device if one is present, or NULL.
- */
-struct gen_pool *gen_pool_get(struct device *dev)
-{
- struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
- NULL);
+free_devres:
+ devres_free(ptr);
+free_pool_name:
+ kfree_const(pool_name);
- if (!p)
- return NULL;
- return *p;
+ return ERR_PTR(-ENOMEM);
}
-EXPORT_SYMBOL_GPL(gen_pool_get);
+EXPORT_SYMBOL(devm_gen_pool_create);
#ifdef CONFIG_OF
/**
@@ -633,16 +669,30 @@ struct gen_pool *of_gen_pool_get(struct device_node *np,
const char *propname, int index)
{
struct platform_device *pdev;
- struct device_node *np_pool;
+ struct device_node *np_pool, *parent;
+ const char *name = NULL;
+ struct gen_pool *pool = NULL;
np_pool = of_parse_phandle(np, propname, index);
if (!np_pool)
return NULL;
+
pdev = of_find_device_by_node(np_pool);
+ if (!pdev) {
+ /* Check if named gen_pool is created by parent node device */
+ parent = of_get_parent(np_pool);
+ pdev = of_find_device_by_node(parent);
+ of_node_put(parent);
+
+ of_property_read_string(np_pool, "label", &name);
+ if (!name)
+ name = np_pool->name;
+ }
+ if (pdev)
+ pool = gen_pool_get(&pdev->dev, name);
of_node_put(np_pool);
- if (!pdev)
- return NULL;
- return gen_pool_get(&pdev->dev);
+
+ return pool;
}
EXPORT_SYMBOL_GPL(of_gen_pool_get);
#endif /* CONFIG_OF */
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index ff19f66d3f7f..b1c93e94ca7a 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -21,8 +21,7 @@ static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
static inline bool need_flush(struct iommu_map_table *iommu)
{
- return (iommu->lazy_flush != NULL &&
- (iommu->flags & IOMMU_NEED_FLUSH) != 0);
+ return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
}
static inline void set_flush(struct iommu_map_table *iommu)
@@ -211,7 +210,8 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
goto bail;
}
}
- if (n < pool->hint || need_flush(iommu)) {
+ if (iommu->lazy_flush &&
+ (n < pool->hint || need_flush(iommu))) {
clear_flush(iommu);
iommu->lazy_flush(iommu);
}
diff --git a/lib/klist.c b/lib/klist.c
index 89b485a2a58d..d74cf7a29afd 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -324,6 +324,47 @@ static struct klist_node *to_klist_node(struct list_head *n)
}
/**
+ * klist_prev - Ante up prev node in list.
+ * @i: Iterator structure.
+ *
+ * First grab list lock. Decrement the reference count of the previous
+ * node, if there was one. Grab the prev node, increment its reference
+ * count, drop the lock, and return that prev node.
+ */
+struct klist_node *klist_prev(struct klist_iter *i)
+{
+ void (*put)(struct klist_node *) = i->i_klist->put;
+ struct klist_node *last = i->i_cur;
+ struct klist_node *prev;
+
+ spin_lock(&i->i_klist->k_lock);
+
+ if (last) {
+ prev = to_klist_node(last->n_node.prev);
+ if (!klist_dec_and_del(last))
+ put = NULL;
+ } else
+ prev = to_klist_node(i->i_klist->k_list.prev);
+
+ i->i_cur = NULL;
+ while (prev != to_klist_node(&i->i_klist->k_list)) {
+ if (likely(!knode_dead(prev))) {
+ kref_get(&prev->n_ref);
+ i->i_cur = prev;
+ break;
+ }
+ prev = to_klist_node(prev->n_node.prev);
+ }
+
+ spin_unlock(&i->i_klist->k_lock);
+
+ if (put && last)
+ put(last);
+ return i->i_cur;
+}
+EXPORT_SYMBOL_GPL(klist_prev);
+
+/**
* klist_next - Ante up next node in list.
* @i: Iterator structure.
*
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index ec8da78df9be..94be244e8441 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -152,7 +152,7 @@ int kstrtoll(const char *s, unsigned int base, long long *res)
rv = _kstrtoull(s + 1, base, &tmp);
if (rv < 0)
return rv;
- if ((long long)(-tmp) >= 0)
+ if ((long long)-tmp > 0)
return -ERANGE;
*res = -tmp;
} else {
diff --git a/lib/lockref.c b/lib/lockref.c
index 494994bf17c8..5a92189ad711 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -4,14 +4,6 @@
#if USE_CMPXCHG_LOCKREF
/*
- * Allow weakly-ordered memory architectures to provide barrier-less
- * cmpxchg semantics for lockref updates.
- */
-#ifndef cmpxchg64_relaxed
-# define cmpxchg64_relaxed cmpxchg64
-#endif
-
-/*
* Note that the "cmpxchg()" reloads the "old" value for the
* failure case.
*/
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index bc0a1da8afba..95c52a95259e 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -146,18 +146,25 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
uint8_t *p;
mpi_limb_t alimb;
unsigned int n = mpi_get_size(a);
- int i;
+ int i, lzeros = 0;
- if (buf_len < n || !buf)
+ if (buf_len < n || !buf || !nbytes)
return -EINVAL;
if (sign)
*sign = a->sign;
- if (nbytes)
- *nbytes = n;
+ p = (void *)&a->d[a->nlimbs] - 1;
+
+ for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
+ if (!*p)
+ lzeros++;
+ else
+ break;
+ }
p = buf;
+ *nbytes = n - lzeros;
for (i = a->nlimbs - 1; i >= 0; i--) {
alimb = a->d[i];
@@ -178,6 +185,19 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
#else
#error please implement for this limb size.
#endif
+
+ if (lzeros > 0) {
+ if (lzeros >= sizeof(alimb)) {
+ p -= sizeof(alimb);
+ } else {
+ mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
+ mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
+ + lzeros;
+ *limb1 = *limb2;
+ p -= lzeros;
+ }
+ lzeros -= sizeof(alimb);
+ }
}
return 0;
}
@@ -197,7 +217,7 @@ EXPORT_SYMBOL_GPL(mpi_read_buffer);
*/
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign)
{
- uint8_t *buf, *p;
+ uint8_t *buf;
unsigned int n;
int ret;
@@ -220,14 +240,6 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign)
kfree(buf);
return NULL;
}
-
- /* this is sub-optimal but we need to do the shift operation
- * because the caller has to free the returned buffer */
- for (p = buf; !*p && *nbytes; p++, --*nbytes)
- ;
- if (p != buf)
- memmove(buf, p, *nbytes);
-
return buf;
}
EXPORT_SYMBOL_GPL(mpi_get_buffer);
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
new file mode 100644
index 000000000000..88d3d32e5923
--- /dev/null
+++ b/lib/nmi_backtrace.c
@@ -0,0 +1,162 @@
+/*
+ * NMI backtrace support
+ *
+ * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King,
+ * with the following header:
+ *
+ * HW NMI watchdog support
+ *
+ * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
+ *
+ * Arch specific calls to support NMI watchdog
+ *
+ * Bits copied from original nmi.c file
+ */
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/kprobes.h>
+#include <linux/nmi.h>
+#include <linux/seq_buf.h>
+
+#ifdef arch_trigger_all_cpu_backtrace
+/* For reliability, we're prepared to waste bits here. */
+static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
+static cpumask_t printtrace_mask;
+
+#define NMI_BUF_SIZE 4096
+
+struct nmi_seq_buf {
+ unsigned char buffer[NMI_BUF_SIZE];
+ struct seq_buf seq;
+};
+
+/* Safe printing in NMI context */
+static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+static void print_seq_line(struct nmi_seq_buf *s, int start, int end)
+{
+ const char *buf = s->buffer + start;
+
+ printk("%.*s", (end - start) + 1, buf);
+}
+
+void nmi_trigger_all_cpu_backtrace(bool include_self,
+ void (*raise)(cpumask_t *mask))
+{
+ struct nmi_seq_buf *s;
+ int i, cpu, this_cpu = get_cpu();
+
+ if (test_and_set_bit(0, &backtrace_flag)) {
+ /*
+ * If there is already a trigger_all_cpu_backtrace() in progress
+ * (backtrace_flag == 1), don't output double cpu dump infos.
+ */
+ put_cpu();
+ return;
+ }
+
+ cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
+ if (!include_self)
+ cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
+
+ cpumask_copy(&printtrace_mask, to_cpumask(backtrace_mask));
+
+ /*
+ * Set up per_cpu seq_buf buffers that the NMIs running on the other
+ * CPUs will write to.
+ */
+ for_each_cpu(cpu, to_cpumask(backtrace_mask)) {
+ s = &per_cpu(nmi_print_seq, cpu);
+ seq_buf_init(&s->seq, s->buffer, NMI_BUF_SIZE);
+ }
+
+ if (!cpumask_empty(to_cpumask(backtrace_mask))) {
+ pr_info("Sending NMI to %s CPUs:\n",
+ (include_self ? "all" : "other"));
+ raise(to_cpumask(backtrace_mask));
+ }
+
+ /* Wait for up to 10 seconds for all CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpumask_empty(to_cpumask(backtrace_mask)))
+ break;
+ mdelay(1);
+ touch_softlockup_watchdog();
+ }
+
+ /*
+ * Now that all the NMIs have triggered, we can dump out their
+ * back traces safely to the console.
+ */
+ for_each_cpu(cpu, &printtrace_mask) {
+ int len, last_i = 0;
+
+ s = &per_cpu(nmi_print_seq, cpu);
+ len = seq_buf_used(&s->seq);
+ if (!len)
+ continue;
+
+ /* Print line by line. */
+ for (i = 0; i < len; i++) {
+ if (s->buffer[i] == '\n') {
+ print_seq_line(s, last_i, i);
+ last_i = i + 1;
+ }
+ }
+ /* Check if there was a partial line. */
+ if (last_i < len) {
+ print_seq_line(s, last_i, len - 1);
+ pr_cont("\n");
+ }
+ }
+
+ clear_bit(0, &backtrace_flag);
+ smp_mb__after_atomic();
+ put_cpu();
+}
+
+/*
+ * It is not safe to call printk() directly from NMI handlers.
+ * It may be fine if the NMI detected a lock up and we have no choice
+ * but to do so, but doing a NMI on all other CPUs to get a back trace
+ * can be done with a sysrq-l. We don't want that to lock up, which
+ * can happen if the NMI interrupts a printk in progress.
+ *
+ * Instead, we redirect the vprintk() to this nmi_vprintk() that writes
+ * the content into a per cpu seq_buf buffer. Then when the NMIs are
+ * all done, we can safely dump the contents of the seq_buf to a printk()
+ * from a non NMI context.
+ */
+static int nmi_vprintk(const char *fmt, va_list args)
+{
+ struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
+ unsigned int len = seq_buf_used(&s->seq);
+
+ seq_buf_vprintf(&s->seq, fmt, args);
+ return seq_buf_used(&s->seq) - len;
+}
+
+bool nmi_cpu_backtrace(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+
+ if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
+ printk_func_t printk_func_save = this_cpu_read(printk_func);
+
+ /* Replace printk to write into the NMI seq */
+ this_cpu_write(printk_func, nmi_vprintk);
+ pr_warn("NMI backtrace for cpu %d\n", cpu);
+ show_regs(regs);
+ this_cpu_write(printk_func, printk_func_save);
+
+ cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
+ return true;
+ }
+
+ return false;
+}
+NOKPROBE_SYMBOL(nmi_cpu_backtrace);
+#endif
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
index bcce5f149310..c10fba461454 100644
--- a/lib/pci_iomap.c
+++ b/lib/pci_iomap.c
@@ -41,17 +41,59 @@ void __iomem *pci_iomap_range(struct pci_dev *dev,
len = maxlen;
if (flags & IORESOURCE_IO)
return __pci_ioport_map(dev, start, len);
- if (flags & IORESOURCE_MEM) {
- if (flags & IORESOURCE_CACHEABLE)
- return ioremap(start, len);
- return ioremap_nocache(start, len);
- }
+ if (flags & IORESOURCE_MEM)
+ return ioremap(start, len);
/* What? */
return NULL;
}
EXPORT_SYMBOL(pci_iomap_range);
/**
+ * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @offset: map memory at the given offset in BAR
+ * @maxlen: max length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR from offset to the end, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
+ int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ resource_size_t start = pci_resource_start(dev, bar);
+ resource_size_t len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+
+ if (flags & IORESOURCE_IO)
+ return NULL;
+
+ if (len <= offset || !start)
+ return NULL;
+
+ len -= offset;
+ start += offset;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+
+ if (flags & IORESOURCE_MEM)
+ return ioremap_wc(start, len);
+
+ /* What? */
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
+
+/**
* pci_iomap - create a virtual mapping cookie for a PCI BAR
* @dev: PCI device that owns the BAR
* @bar: BAR number
@@ -70,4 +112,25 @@ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
return pci_iomap_range(dev, bar, 0, maxlen);
}
EXPORT_SYMBOL(pci_iomap);
+
+/**
+ * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_wc_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc);
#endif /* CONFIG_PCI */
diff --git a/lib/raid6/neon.c b/lib/raid6/neon.c
index d9ad6ee284f4..7076ef1ba3dd 100644
--- a/lib/raid6/neon.c
+++ b/lib/raid6/neon.c
@@ -40,9 +40,20 @@
(unsigned long)bytes, ptrs); \
kernel_neon_end(); \
} \
+ static void raid6_neon ## _n ## _xor_syndrome(int disks, \
+ int start, int stop, \
+ size_t bytes, void **ptrs) \
+ { \
+ void raid6_neon ## _n ## _xor_syndrome_real(int, \
+ int, int, unsigned long, void**); \
+ kernel_neon_begin(); \
+ raid6_neon ## _n ## _xor_syndrome_real(disks, \
+ start, stop, (unsigned long)bytes, ptrs); \
+ kernel_neon_end(); \
+ } \
struct raid6_calls const raid6_neonx ## _n = { \
raid6_neon ## _n ## _gen_syndrome, \
- NULL, /* XOR not yet implemented */ \
+ raid6_neon ## _n ## _xor_syndrome, \
raid6_have_neon, \
"neonx" #_n, \
0 \
diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc
index 1b9ed793342d..4fa51b761dd0 100644
--- a/lib/raid6/neon.uc
+++ b/lib/raid6/neon.uc
@@ -3,6 +3,7 @@
* neon.uc - RAID-6 syndrome calculation using ARM NEON instructions
*
* Copyright (C) 2012 Rob Herring
+ * Copyright (C) 2015 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* Based on altivec.uc:
* Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
@@ -78,3 +79,48 @@ void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
vst1q_u8(&q[d+NSIZE*$$], wq$$);
}
}
+
+void raid6_neon$#_xor_syndrome_real(int disks, int start, int stop,
+ unsigned long bytes, void **ptrs)
+{
+ uint8_t **dptr = (uint8_t **)ptrs;
+ uint8_t *p, *q;
+ int d, z, z0;
+
+ register unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
+ const unative_t x1d = NBYTES(0x1d);
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+ wq$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]);
+ wp$$ = veorq_u8(vld1q_u8(&p[d+$$*NSIZE]), wq$$);
+
+ /* P/Q data pages */
+ for ( z = z0-1 ; z >= start ; z-- ) {
+ wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]);
+ wp$$ = veorq_u8(wp$$, wd$$);
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+
+ w2$$ = vandq_u8(w2$$, x1d);
+ w1$$ = veorq_u8(w1$$, w2$$);
+ wq$$ = veorq_u8(w1$$, wd$$);
+ }
+ /* P/Q left side optimization */
+ for ( z = start-1 ; z >= 0 ; z-- ) {
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+
+ w2$$ = vandq_u8(w2$$, x1d);
+ wq$$ = veorq_u8(w1$$, w2$$);
+ }
+ w1$$ = vld1q_u8(&q[d+NSIZE*$$]);
+ wq$$ = veorq_u8(wq$$, w1$$);
+
+ vst1q_u8(&p[d+NSIZE*$$], wp$$);
+ vst1q_u8(&q[d+NSIZE*$$], wq$$);
+ }
+}
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index cc0c69710dcf..a54ff8949f91 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -187,10 +187,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
head = rht_dereference_bucket(new_tbl->buckets[new_hash],
new_tbl, new_hash);
- if (rht_is_a_nulls(head))
- INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
- else
- RCU_INIT_POINTER(entry->next, head);
+ RCU_INIT_POINTER(entry->next, head);
rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
spin_unlock(new_bucket_lock);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index d105a9f56878..bafa9933fa76 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -105,16 +105,12 @@ EXPORT_SYMBOL(sg_nents_for_len);
**/
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
{
-#ifndef CONFIG_ARCH_HAS_SG_CHAIN
- struct scatterlist *ret = &sgl[nents - 1];
-#else
struct scatterlist *sg, *ret = NULL;
unsigned int i;
for_each_sg(sgl, sg, nents, i)
ret = sg;
-#endif
#ifdef CONFIG_DEBUG_SG
BUG_ON(sgl[0].sg_magic != SG_MAGIC);
BUG_ON(!sg_is_last(ret));
diff --git a/lib/sg_split.c b/lib/sg_split.c
new file mode 100644
index 000000000000..b063410c3593
--- /dev/null
+++ b/lib/sg_split.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2015 Robert Jarzmik <robert.jarzmik@free.fr>
+ *
+ * Scatterlist splitting helpers.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+struct sg_splitter {
+ struct scatterlist *in_sg0;
+ int nents;
+ off_t skip_sg0;
+ unsigned int length_last_sg;
+
+ struct scatterlist *out_sg;
+};
+
+static int sg_calculate_split(struct scatterlist *in, int nents, int nb_splits,
+ off_t skip, const size_t *sizes,
+ struct sg_splitter *splitters, bool mapped)
+{
+ int i;
+ unsigned int sglen;
+ size_t size = sizes[0], len;
+ struct sg_splitter *curr = splitters;
+ struct scatterlist *sg;
+
+ for (i = 0; i < nb_splits; i++) {
+ splitters[i].in_sg0 = NULL;
+ splitters[i].nents = 0;
+ }
+
+ for_each_sg(in, sg, nents, i) {
+ sglen = mapped ? sg_dma_len(sg) : sg->length;
+ if (skip > sglen) {
+ skip -= sglen;
+ continue;
+ }
+
+ len = min_t(size_t, size, sglen - skip);
+ if (!curr->in_sg0) {
+ curr->in_sg0 = sg;
+ curr->skip_sg0 = skip;
+ }
+ size -= len;
+ curr->nents++;
+ curr->length_last_sg = len;
+
+ while (!size && (skip + len < sglen) && (--nb_splits > 0)) {
+ curr++;
+ size = *(++sizes);
+ skip += len;
+ len = min_t(size_t, size, sglen - skip);
+
+ curr->in_sg0 = sg;
+ curr->skip_sg0 = skip;
+ curr->nents = 1;
+ curr->length_last_sg = len;
+ size -= len;
+ }
+ skip = 0;
+
+ if (!size && --nb_splits > 0) {
+ curr++;
+ size = *(++sizes);
+ }
+
+ if (!nb_splits)
+ break;
+ }
+
+ return (size || !splitters[0].in_sg0) ? -EINVAL : 0;
+}
+
+static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits)
+{
+ int i, j;
+ struct scatterlist *in_sg, *out_sg;
+ struct sg_splitter *split;
+
+ for (i = 0, split = splitters; i < nb_splits; i++, split++) {
+ in_sg = split->in_sg0;
+ out_sg = split->out_sg;
+ for (j = 0; j < split->nents; j++, out_sg++) {
+ *out_sg = *in_sg;
+ if (!j) {
+ out_sg->offset += split->skip_sg0;
+ out_sg->length -= split->skip_sg0;
+ } else {
+ out_sg->offset = 0;
+ }
+ sg_dma_address(out_sg) = 0;
+ sg_dma_len(out_sg) = 0;
+ in_sg = sg_next(in_sg);
+ }
+ out_sg[-1].length = split->length_last_sg;
+ sg_mark_end(out_sg - 1);
+ }
+}
+
+static void sg_split_mapped(struct sg_splitter *splitters, const int nb_splits)
+{
+ int i, j;
+ struct scatterlist *in_sg, *out_sg;
+ struct sg_splitter *split;
+
+ for (i = 0, split = splitters; i < nb_splits; i++, split++) {
+ in_sg = split->in_sg0;
+ out_sg = split->out_sg;
+ for (j = 0; j < split->nents; j++, out_sg++) {
+ sg_dma_address(out_sg) = sg_dma_address(in_sg);
+ sg_dma_len(out_sg) = sg_dma_len(in_sg);
+ if (!j) {
+ sg_dma_address(out_sg) += split->skip_sg0;
+ sg_dma_len(out_sg) -= split->skip_sg0;
+ }
+ in_sg = sg_next(in_sg);
+ }
+ sg_dma_len(--out_sg) = split->length_last_sg;
+ }
+}
+
+/**
+ * sg_split - split a scatterlist into several scatterlists
+ * @in: the input sg list
+ * @in_mapped_nents: the result of a dma_map_sg(in, ...), or 0 if not mapped.
+ * @skip: the number of bytes to skip in the input sg list
+ * @nb_splits: the number of desired sg outputs
+ * @split_sizes: the respective size of each output sg list in bytes
+ * @out: an array where to store the allocated output sg lists
+ * @out_mapped_nents: the resulting sg lists mapped number of sg entries. Might
+ * be NULL if sglist not already mapped (in_mapped_nents = 0)
+ * @gfp_mask: the allocation flag
+ *
+ * This function splits the input sg list into nb_splits sg lists, which are
+ * allocated and stored into out.
+ * The @in is split into :
+ * - @out[0], which covers bytes [@skip .. @skip + @split_sizes[0] - 1] of @in
+ * - @out[1], which covers bytes [@skip + split_sizes[0] ..
+ * @skip + @split_sizes[0] + @split_sizes[1] -1]
+ * etc ...
+ * It will be the caller's duty to kfree() out array members.
+ *
+ * Returns 0 upon success, or error code
+ */
+int sg_split(struct scatterlist *in, const int in_mapped_nents,
+ const off_t skip, const int nb_splits,
+ const size_t *split_sizes,
+ struct scatterlist **out, int *out_mapped_nents,
+ gfp_t gfp_mask)
+{
+ int i, ret;
+ struct sg_splitter *splitters;
+
+ splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask);
+ if (!splitters)
+ return -ENOMEM;
+
+ ret = sg_calculate_split(in, sg_nents(in), nb_splits, skip, split_sizes,
+ splitters, false);
+ if (ret < 0)
+ goto err;
+
+ ret = -ENOMEM;
+ for (i = 0; i < nb_splits; i++) {
+ splitters[i].out_sg = kmalloc_array(splitters[i].nents,
+ sizeof(struct scatterlist),
+ gfp_mask);
+ if (!splitters[i].out_sg)
+ goto err;
+ }
+
+ /*
+ * The order of these 3 calls is important and should be kept.
+ */
+ sg_split_phys(splitters, nb_splits);
+ ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip,
+ split_sizes, splitters, true);
+ if (ret < 0)
+ goto err;
+ sg_split_mapped(splitters, nb_splits);
+
+ for (i = 0; i < nb_splits; i++) {
+ out[i] = splitters[i].out_sg;
+ if (out_mapped_nents)
+ out_mapped_nents[i] = splitters[i].nents;
+ }
+
+ kfree(splitters);
+ return 0;
+
+err:
+ for (i = 0; i < nb_splits; i++)
+ kfree(splitters[i].out_sg);
+ kfree(splitters);
+ return ret;
+}
+EXPORT_SYMBOL(sg_split);
diff --git a/lib/show_mem.c b/lib/show_mem.c
index adc98e1825ba..1feed6a2b12a 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -38,11 +38,9 @@ void show_mem(unsigned int filter)
printk("%lu pages RAM\n", total);
printk("%lu pages HighMem/MovableOnly\n", highmem);
+ printk("%lu pages reserved\n", reserved);
#ifdef CONFIG_CMA
- printk("%lu pages reserved\n", (reserved - totalcma_pages));
printk("%lu pages cma reserved\n", totalcma_pages);
-#else
- printk("%lu pages reserved\n", reserved);
#endif
#ifdef CONFIG_QUICKLIST
printk("%lu pages in pagetable cache\n",
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index c98ae818eb4e..5939f63d90cd 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -59,7 +59,11 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
}
exp = divisor[units] / (u32)blk_size;
- if (size >= exp) {
+ /*
+ * size must be strictly greater than exp here to ensure that remainder
+ * is greater than divisor[units] coming out of the if below.
+ */
+ if (size > exp) {
remainder = do_div(size, divisor[units]);
remainder *= blk_size;
i++;
@@ -410,7 +414,7 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* @dst: destination buffer (escaped)
* @osz: destination buffer size
* @flags: combination of the flags (bitwise OR):
- * %ESCAPE_SPACE:
+ * %ESCAPE_SPACE: (special white space, not space itself)
* '\f' - form feed
* '\n' - new line
* '\r' - carriage return
@@ -432,16 +436,18 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* all previous together
* %ESCAPE_HEX:
* '\xHH' - byte with hexadecimal value HH (2 digits)
- * @esc: NULL-terminated string of characters any of which, if found in
- * the source, has to be escaped
+ * @only: NULL-terminated string containing characters used to limit
+ * the selected escape class. If characters are included in @only
+ * that would not normally be escaped by the classes selected
+ * in @flags, they will be copied to @dst unescaped.
*
* Description:
* The process of escaping byte buffer includes several parts. They are applied
* in the following sequence.
* 1. The character is matched to the printable class, if asked, and in
* case of match it passes through to the output.
- * 2. The character is not matched to the one from @esc string and thus
- * must go as is to the output.
+ * 2. The character is not matched to the one from @only string and thus
+ * must go as-is to the output.
* 3. The character is checked if it falls into the class given by @flags.
* %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
* character. Note that they actually can't go together, otherwise
@@ -458,11 +464,11 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* dst for a '\0' terminator if and only if ret < osz.
*/
int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
- unsigned int flags, const char *esc)
+ unsigned int flags, const char *only)
{
char *p = dst;
char *end = p + osz;
- bool is_dict = esc && *esc;
+ bool is_dict = only && *only;
while (isz--) {
unsigned char c = *src++;
@@ -471,7 +477,7 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
* Apply rules in the following sequence:
* - the character is printable, when @flags has
* %ESCAPE_NP bit set
- * - the @esc string is supplied and does not contain a
+ * - the @only string is supplied and does not contain a
* character under question
* - the character doesn't fall into a class of symbols
* defined by given @flags
@@ -479,7 +485,7 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
* output buffer.
*/
if ((flags & ESCAPE_NP && isprint(c)) ||
- (is_dict && !strchr(esc, c))) {
+ (is_dict && !strchr(only, c))) {
/* do nothing */
} else {
if (flags & ESCAPE_SPACE && escape_space(c, &p, end))
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c
index 4137bca5f8e8..f355f67169b6 100644
--- a/lib/test-kstrtox.c
+++ b/lib/test-kstrtox.c
@@ -260,6 +260,7 @@ static void __init test_kstrtoll_ok(void)
{"4294967297", 10, 4294967297LL},
{"9223372036854775807", 10, 9223372036854775807LL},
+ {"-0", 10, 0LL},
{"-1", 10, -1LL},
{"-2", 10, -2LL},
{"-9223372036854775808", 10, LLONG_MIN},
@@ -277,11 +278,6 @@ static void __init test_kstrtoll_fail(void)
{"-9223372036854775809", 10},
{"-18446744073709551614", 10},
{"-18446744073709551615", 10},
- /* negative zero isn't an integer in Linux */
- {"-0", 0},
- {"-0", 8},
- {"-0", 10},
- {"-0", 16},
/* sign is first character if any */
{"-+1", 0},
{"-+1", 8},
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 098c08eddfab..c1efb1b61017 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -65,7 +65,7 @@ static noinline void __init kmalloc_node_oob_right(void)
kfree(ptr);
}
-static noinline void __init kmalloc_large_oob_rigth(void)
+static noinline void __init kmalloc_large_oob_right(void)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
@@ -114,7 +114,7 @@ static noinline void __init kmalloc_oob_krealloc_less(void)
kfree(ptr1);
return;
}
- ptr2[size1] = 'x';
+ ptr2[size2] = 'x';
kfree(ptr2);
}
@@ -259,7 +259,7 @@ static int __init kmalloc_tests_init(void)
kmalloc_oob_right();
kmalloc_oob_left();
kmalloc_node_oob_right();
- kmalloc_large_oob_rigth();
+ kmalloc_large_oob_right();
kmalloc_oob_krealloc_more();
kmalloc_oob_krealloc_less();
kmalloc_oob_16();
diff --git a/lib/test_static_key_base.c b/lib/test_static_key_base.c
new file mode 100644
index 000000000000..729447aea02f
--- /dev/null
+++ b/lib/test_static_key_base.c
@@ -0,0 +1,68 @@
+/*
+ * Kernel module for testing static keys.
+ *
+ * Copyright 2015 Akamai Technologies Inc. All Rights Reserved
+ *
+ * Authors:
+ * Jason Baron <jbaron@akamai.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/jump_label.h>
+
+/* old keys */
+struct static_key base_old_true_key = STATIC_KEY_INIT_TRUE;
+EXPORT_SYMBOL_GPL(base_old_true_key);
+struct static_key base_inv_old_true_key = STATIC_KEY_INIT_TRUE;
+EXPORT_SYMBOL_GPL(base_inv_old_true_key);
+struct static_key base_old_false_key = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(base_old_false_key);
+struct static_key base_inv_old_false_key = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(base_inv_old_false_key);
+
+/* new keys */
+DEFINE_STATIC_KEY_TRUE(base_true_key);
+EXPORT_SYMBOL_GPL(base_true_key);
+DEFINE_STATIC_KEY_TRUE(base_inv_true_key);
+EXPORT_SYMBOL_GPL(base_inv_true_key);
+DEFINE_STATIC_KEY_FALSE(base_false_key);
+EXPORT_SYMBOL_GPL(base_false_key);
+DEFINE_STATIC_KEY_FALSE(base_inv_false_key);
+EXPORT_SYMBOL_GPL(base_inv_false_key);
+
+static void invert_key(struct static_key *key)
+{
+ if (static_key_enabled(key))
+ static_key_disable(key);
+ else
+ static_key_enable(key);
+}
+
+static int __init test_static_key_base_init(void)
+{
+ invert_key(&base_inv_old_true_key);
+ invert_key(&base_inv_old_false_key);
+ invert_key(&base_inv_true_key.key);
+ invert_key(&base_inv_false_key.key);
+
+ return 0;
+}
+
+static void __exit test_static_key_base_exit(void)
+{
+}
+
+module_init(test_static_key_base_init);
+module_exit(test_static_key_base_exit);
+
+MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_static_keys.c b/lib/test_static_keys.c
new file mode 100644
index 000000000000..c61b299e367f
--- /dev/null
+++ b/lib/test_static_keys.c
@@ -0,0 +1,225 @@
+/*
+ * Kernel module for testing static keys.
+ *
+ * Copyright 2015 Akamai Technologies Inc. All Rights Reserved
+ *
+ * Authors:
+ * Jason Baron <jbaron@akamai.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/jump_label.h>
+
+/* old keys */
+struct static_key old_true_key = STATIC_KEY_INIT_TRUE;
+struct static_key old_false_key = STATIC_KEY_INIT_FALSE;
+
+/* new api */
+DEFINE_STATIC_KEY_TRUE(true_key);
+DEFINE_STATIC_KEY_FALSE(false_key);
+
+/* external */
+extern struct static_key base_old_true_key;
+extern struct static_key base_inv_old_true_key;
+extern struct static_key base_old_false_key;
+extern struct static_key base_inv_old_false_key;
+
+/* new api */
+extern struct static_key_true base_true_key;
+extern struct static_key_true base_inv_true_key;
+extern struct static_key_false base_false_key;
+extern struct static_key_false base_inv_false_key;
+
+
+struct test_key {
+ bool init_state;
+ struct static_key *key;
+ bool (*test_key)(void);
+};
+
+#define test_key_func(key, branch) \
+ ({bool func(void) { return branch(key); } func; })
+
+static void invert_key(struct static_key *key)
+{
+ if (static_key_enabled(key))
+ static_key_disable(key);
+ else
+ static_key_enable(key);
+}
+
+static void invert_keys(struct test_key *keys, int size)
+{
+ struct static_key *previous = NULL;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (previous != keys[i].key) {
+ invert_key(keys[i].key);
+ previous = keys[i].key;
+ }
+ }
+}
+
+static int verify_keys(struct test_key *keys, int size, bool invert)
+{
+ int i;
+ bool ret, init;
+
+ for (i = 0; i < size; i++) {
+ ret = static_key_enabled(keys[i].key);
+ init = keys[i].init_state;
+ if (ret != (invert ? !init : init))
+ return -EINVAL;
+ ret = keys[i].test_key();
+ if (static_key_enabled(keys[i].key)) {
+ if (!ret)
+ return -EINVAL;
+ } else {
+ if (ret)
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int __init test_static_key_init(void)
+{
+ int ret;
+ int size;
+
+ struct test_key static_key_tests[] = {
+ /* internal keys - old keys */
+ {
+ .init_state = true,
+ .key = &old_true_key,
+ .test_key = test_key_func(&old_true_key, static_key_true),
+ },
+ {
+ .init_state = false,
+ .key = &old_false_key,
+ .test_key = test_key_func(&old_false_key, static_key_false),
+ },
+ /* internal keys - new keys */
+ {
+ .init_state = true,
+ .key = &true_key.key,
+ .test_key = test_key_func(&true_key, static_branch_likely),
+ },
+ {
+ .init_state = true,
+ .key = &true_key.key,
+ .test_key = test_key_func(&true_key, static_branch_unlikely),
+ },
+ {
+ .init_state = false,
+ .key = &false_key.key,
+ .test_key = test_key_func(&false_key, static_branch_likely),
+ },
+ {
+ .init_state = false,
+ .key = &false_key.key,
+ .test_key = test_key_func(&false_key, static_branch_unlikely),
+ },
+ /* external keys - old keys */
+ {
+ .init_state = true,
+ .key = &base_old_true_key,
+ .test_key = test_key_func(&base_old_true_key, static_key_true),
+ },
+ {
+ .init_state = false,
+ .key = &base_inv_old_true_key,
+ .test_key = test_key_func(&base_inv_old_true_key, static_key_true),
+ },
+ {
+ .init_state = false,
+ .key = &base_old_false_key,
+ .test_key = test_key_func(&base_old_false_key, static_key_false),
+ },
+ {
+ .init_state = true,
+ .key = &base_inv_old_false_key,
+ .test_key = test_key_func(&base_inv_old_false_key, static_key_false),
+ },
+ /* external keys - new keys */
+ {
+ .init_state = true,
+ .key = &base_true_key.key,
+ .test_key = test_key_func(&base_true_key, static_branch_likely),
+ },
+ {
+ .init_state = true,
+ .key = &base_true_key.key,
+ .test_key = test_key_func(&base_true_key, static_branch_unlikely),
+ },
+ {
+ .init_state = false,
+ .key = &base_inv_true_key.key,
+ .test_key = test_key_func(&base_inv_true_key, static_branch_likely),
+ },
+ {
+ .init_state = false,
+ .key = &base_inv_true_key.key,
+ .test_key = test_key_func(&base_inv_true_key, static_branch_unlikely),
+ },
+ {
+ .init_state = false,
+ .key = &base_false_key.key,
+ .test_key = test_key_func(&base_false_key, static_branch_likely),
+ },
+ {
+ .init_state = false,
+ .key = &base_false_key.key,
+ .test_key = test_key_func(&base_false_key, static_branch_unlikely),
+ },
+ {
+ .init_state = true,
+ .key = &base_inv_false_key.key,
+ .test_key = test_key_func(&base_inv_false_key, static_branch_likely),
+ },
+ {
+ .init_state = true,
+ .key = &base_inv_false_key.key,
+ .test_key = test_key_func(&base_inv_false_key, static_branch_unlikely),
+ },
+ };
+
+ size = ARRAY_SIZE(static_key_tests);
+
+ ret = verify_keys(static_key_tests, size, false);
+ if (ret)
+ goto out;
+
+ invert_keys(static_key_tests, size);
+ ret = verify_keys(static_key_tests, size, true);
+ if (ret)
+ goto out;
+
+ invert_keys(static_key_tests, size);
+ ret = verify_keys(static_key_tests, size, false);
+ if (ret)
+ goto out;
+ return 0;
+out:
+ return ret;
+}
+
+static void __exit test_static_key_exit(void)
+{
+}
+
+module_init(test_static_key_init);
+module_exit(test_static_key_exit);
+
+MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index da39c608a28c..95cd63b43b99 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -17,6 +17,7 @@
*/
#include <stdarg.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/module.h> /* for KSYM_SYMBOL_LEN */
#include <linux/types.h>
diff --git a/lib/zlib_deflate/deftree.c b/lib/zlib_deflate/deftree.c
index ddf348299f24..9b1756b12743 100644
--- a/lib/zlib_deflate/deftree.c
+++ b/lib/zlib_deflate/deftree.c
@@ -35,6 +35,7 @@
/* #include "deflate.h" */
#include <linux/zutil.h>
+#include <linux/bitrev.h>
#include "defutil.h"
#ifdef DEBUG_ZLIB
@@ -146,7 +147,6 @@ static void send_all_trees (deflate_state *s, int lcodes, int dcodes,
static void compress_block (deflate_state *s, ct_data *ltree,
ct_data *dtree);
static void set_data_type (deflate_state *s);
-static unsigned bi_reverse (unsigned value, int length);
static void bi_windup (deflate_state *s);
static void bi_flush (deflate_state *s);
static void copy_block (deflate_state *s, char *buf, unsigned len,
@@ -284,7 +284,7 @@ static void tr_static_init(void)
/* The static distance tree is trivial: */
for (n = 0; n < D_CODES; n++) {
static_dtree[n].Len = 5;
- static_dtree[n].Code = bi_reverse((unsigned)n, 5);
+ static_dtree[n].Code = bitrev32((u32)n) >> (32 - 5);
}
static_init_done = 1;
}
@@ -520,7 +520,7 @@ static void gen_codes(
int len = tree[n].Len;
if (len == 0) continue;
/* Now reverse the bits */
- tree[n].Code = bi_reverse(next_code[len]++, len);
+ tree[n].Code = bitrev32((u32)(next_code[len]++)) >> (32 - len);
Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
diff --git a/lib/zlib_deflate/defutil.h b/lib/zlib_deflate/defutil.h
index b640b6402e99..a8c370897c9f 100644
--- a/lib/zlib_deflate/defutil.h
+++ b/lib/zlib_deflate/defutil.h
@@ -293,22 +293,6 @@ void zlib_tr_stored_type_only (deflate_state *);
}
/* ===========================================================================
- * Reverse the first len bits of a code, using straightforward code (a faster
- * method would use a table)
- * IN assertion: 1 <= len <= 15
- */
-static inline unsigned bi_reverse(unsigned code, /* the value to invert */
- int len) /* its bit length */
-{
- register unsigned res = 0;
- do {
- res |= code & 1;
- code >>= 1, res <<= 1;
- } while (--len > 0);
- return res >> 1;
-}
-
-/* ===========================================================================
* Flush the bit buffer, keeping at most 7 bits in it.
*/
static inline void bi_flush(deflate_state *s)